Ejemplo n.º 1
0
int mali_set_level(struct device *dev, int level)
{
	struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);
	unsigned long freq;
	int ret;
	unsigned int current_level;

	_mali_osk_mutex_wait(drv_data->clockSetlock);

	current_level = drv_data->dvfs.current_level;
	freq = drv_data->fv_info[level].freq;

	if (level == current_level) {
		_mali_osk_mutex_signal(drv_data->clockSetlock);
		return 0;
	}

	ret = dvfs_clk_set_rate(drv_data->clk, freq);
	if (ret) {
		_mali_osk_mutex_signal(drv_data->clockSetlock);
		return ret;
	}

	dev_dbg(dev, "set freq %lu\n", freq);

	drv_data->dvfs.current_level = level;

	_mali_osk_mutex_signal(drv_data->clockSetlock);

	return 0;
}
_mali_osk_errcode_t _mali_ukk_release_ump_mem(_mali_uk_release_ump_mem_s *args)
{
    mali_mem_allocation * descriptor;
    struct mali_session_data *session;

    MALI_DEBUG_ASSERT_POINTER(args);
    MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);

    session = (struct mali_session_data *)args->ctx;
    MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);

    if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session->descriptor_mapping, args->cookie, (void**)&descriptor)) {
        MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
        MALI_ERROR(_MALI_OSK_ERR_FAULT);
    }

    descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args->cookie);

    if (NULL != descriptor) {
        _mali_osk_mutex_wait(session->memory_lock);
        mali_mem_ump_release(descriptor);
        _mali_osk_mutex_signal(session->memory_lock);

        mali_mem_descriptor_destroy(descriptor);
    }

    MALI_SUCCESS;
}
Ejemplo n.º 3
0
void mali_memory_session_end(struct mali_session_data *session)
{
	MALI_DEBUG_PRINT(3, ("MMU session end\n"));

	if (NULL == session) {
		MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
		return;
	}

	/* Lock the session so we can modify the memory list */
	_mali_osk_mutex_wait(session->memory_lock);

	/* Free all allocations still in the descriptor map, and terminate the map */
	if (NULL != session->descriptor_mapping) {
		mali_descriptor_mapping_call_for_each(session->descriptor_mapping, descriptor_table_cleanup_callback);
		mali_descriptor_mapping_destroy(session->descriptor_mapping);
		session->descriptor_mapping = NULL;
	}

	_mali_osk_mutex_signal(session->memory_lock);

	/* Free the lock */
	_mali_osk_mutex_term(session->memory_lock);

	return;
}
Ejemplo n.º 4
0
/* Can NOT run in atomic context */
_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void)
{
#ifdef CONFIG_PM_RUNTIME
    int err;
    MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
    err = pm_runtime_get_sync(&(mali_platform_device->dev));
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
    pm_runtime_mark_last_busy(&(mali_platform_device->dev));
#endif
    if (0 > err) {
        MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
        return _MALI_OSK_ERR_FAULT;
    }
#else
    _mali_osk_mutex_wait(mtk_pm_lock);
    mali_platform_power_mode_change(MALI_POWER_MODE_ON);
    if(_mali_osk_atomic_read(&mtk_mali_suspend_called))
    {	      		
        mali_pm_runtime_resume();
        _mali_osk_atomic_dec(&mtk_mali_suspend_called);
    }
    _mali_osk_atomic_inc(&mtk_mali_pm_ref_count);
    _mali_osk_mutex_signal(mtk_pm_lock);
#endif
    return _MALI_OSK_ERR_OK;
}
Ejemplo n.º 5
0
static void mali_mem_vma_close(struct vm_area_struct *vma)
{
	mali_mem_allocation *descriptor;
	struct mali_session_data *session;
	mali_mem_virt_cpu_mapping *mapping;

	MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));

	descriptor = (mali_mem_allocation *)vma->vm_private_data;
	BUG_ON(!descriptor);

	MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);

	mapping = &descriptor->cpu_mapping;
	BUG_ON(0 == mapping->ref);

	mapping->ref--;
	if (0 != mapping->ref) {
		MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", mapping->ref));
		return;
	}

	session = descriptor->session;

	mali_descriptor_mapping_free(session->descriptor_mapping, descriptor->id);

	_mali_osk_mutex_wait(session->memory_lock);
	mali_mem_release(descriptor);
	_mali_osk_mutex_signal(session->memory_lock);

	mali_mem_descriptor_destroy(descriptor);
}
/** @note munmap handler is done by vma close handler */
int mali_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct mali_session_data *session;
	mali_mem_allocation *descriptor;
	u32 size = vma->vm_end - vma->vm_start;
	u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;

	session = (struct mali_session_data *)filp->private_data;
	if (NULL == session) {
		MALI_PRINT_ERROR(("mmap called without any session data available\n"));
		return -EFAULT;
	}

	MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
			     (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
			     (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));

	/* Set some bits which indicate that, the memory is IO memory, meaning
	 * that no paging is to be performed and the memory should not be
	 * included in crash dumps. And that the memory is reserved, meaning
	 * that it's present and can never be paged out (see also previous
	 * entry)
	 */
	vma->vm_flags |= VM_IO;
	vma->vm_flags |= VM_DONTCOPY;
	vma->vm_flags |= VM_PFNMAP;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
	vma->vm_flags |= VM_RESERVED;
#else
	vma->vm_flags |= VM_DONTDUMP;
	vma->vm_flags |= VM_DONTEXPAND;
#endif

	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */

	descriptor = mali_mem_block_alloc(mali_addr, size, vma, session);
	if (NULL == descriptor) {
		descriptor = mali_mem_os_alloc(mali_addr, size, vma, session);
		if (NULL == descriptor) {
			MALI_DEBUG_PRINT(3, ("MMAP failed\n"));
			return -ENOMEM;
		}
	}

	MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);

	vma->vm_private_data = (void *)descriptor;

	/* Put on descriptor map */
	if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &descriptor->id)) {
		_mali_osk_mutex_wait(session->memory_lock);
		mali_mem_os_release(descriptor);
		_mali_osk_mutex_signal(session->memory_lock);
		return -EFAULT;
	}

	return 0;
}
_mali_osk_errcode_t _mali_internal_profiling_start(u32 * limit)
{
	_mali_osk_errcode_t ret;
	mali_profiling_entry *new_profile_entries;

	_mali_osk_mutex_wait(lock);

	if (MALI_PROFILING_STATE_RUNNING == prof_state) {
		_mali_osk_mutex_signal(lock);
		return _MALI_OSK_ERR_BUSY;
	}

	new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));

	if (NULL == new_profile_entries) {
		_mali_osk_vfree(new_profile_entries);
		return _MALI_OSK_ERR_NOMEM;
	}

	if (MALI_PROFILING_MAX_BUFFER_ENTRIES < *limit) {
		*limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
	}

	profile_mask = 1;
	while (profile_mask <= *limit) {
		profile_mask <<= 1;
	}
	profile_mask >>= 1;

	*limit = profile_mask;

	profile_mask--; /* turns the power of two into a mask of one less */

	if (MALI_PROFILING_STATE_IDLE != prof_state) {
		_mali_osk_mutex_signal(lock);
		_mali_osk_vfree(new_profile_entries);
		return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
	}

	profile_entries = new_profile_entries;

	ret = _mali_timestamp_reset();

	if (_MALI_OSK_ERR_OK == ret) {
		prof_state = MALI_PROFILING_STATE_RUNNING;
	} else {
		_mali_osk_vfree(profile_entries);
		profile_entries = NULL;
	}

	register_trace_mali_timeline_event(probe_mali_timeline_event, NULL);

	_mali_osk_mutex_signal(lock);
	return ret;
}
Ejemplo n.º 8
0
static void MTK_mali_bottom_half_pm_resume ( struct work_struct *work )
{   
    _mali_osk_mutex_wait(mtk_pm_lock);
    mali_platform_power_mode_change(MALI_POWER_MODE_ON);
    if(_mali_osk_atomic_read(&mtk_mali_suspend_called))
    {	      		
        mali_pm_runtime_resume();
        _mali_osk_atomic_dec(&mtk_mali_suspend_called);
    }
    _mali_osk_mutex_signal(mtk_pm_lock);
}
u32 _mali_internal_profiling_get_count(void)
{
	u32 retval = 0;

	_mali_osk_mutex_wait(lock);
	if (MALI_PROFILING_STATE_RETURN == prof_state) {
		retval = _mali_osk_atomic_read(&profile_insert_index);
		if (retval > profile_mask) retval = profile_mask;
	}
	_mali_osk_mutex_signal(lock);

	return retval;
}
Ejemplo n.º 10
0
static void MTK_mali_bottom_half_pm_suspend ( struct work_struct *work )
{    
    _mali_osk_mutex_wait(mtk_pm_lock);
    if((_mali_osk_atomic_read(&mtk_mali_pm_ref_count) == 0) &&
       (_mali_osk_atomic_read(&mtk_mali_suspend_called) == 0))
    {
        if (MALI_TRUE == mali_pm_runtime_suspend())
        {
            _mali_osk_atomic_inc(&mtk_mali_suspend_called);
            mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP);
        }
    }
    _mali_osk_mutex_signal(mtk_pm_lock);
}
Ejemplo n.º 11
0
/* Can run in atomic context */
mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void)
{
#ifdef CONFIG_PM_RUNTIME
	u32 ref;
	MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
	pm_runtime_get_noresume(&(mali_platform_device->dev));
	ref = _mali_osk_atomic_read(&mali_pm_ref_count);
	MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
	return ref > 0 ? MALI_TRUE : MALI_FALSE;
#else
   _mali_osk_mutex_wait(pm_lock);     
	return _mali_osk_atomic_read(&mali_suspend_called) == 0 ? MALI_TRUE : MALI_FALSE;
#endif
}
_mali_osk_errcode_t _mali_internal_profiling_clear(void)
{
	_mali_osk_mutex_wait(lock);

	if (MALI_PROFILING_STATE_RETURN != prof_state) {
		_mali_osk_mutex_signal(lock);
		return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
	}

	prof_state = MALI_PROFILING_STATE_IDLE;
	profile_mask = 0;
	_mali_osk_atomic_init(&profile_insert_index, 0);

	if (NULL != profile_entries) {
		_mali_osk_vfree(profile_entries);
		profile_entries = NULL;
	}

	_mali_osk_mutex_signal(lock);
	return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_internal_profiling_stop(u32 * count)
{
	_mali_osk_mutex_wait(lock);

	if (MALI_PROFILING_STATE_RUNNING != prof_state) {
		_mali_osk_mutex_signal(lock);
		return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
	}

	/* go into return state (user to retreive events), no more events will be added after this */
	prof_state = MALI_PROFILING_STATE_RETURN;

	unregister_trace_mali_timeline_event(probe_mali_timeline_event, NULL);

	_mali_osk_mutex_signal(lock);

	tracepoint_synchronize_unregister();

	*count = _mali_osk_atomic_read(&profile_insert_index);
	if (*count > profile_mask) *count = profile_mask;

	return _MALI_OSK_ERR_OK;
}
Ejemplo n.º 14
0
void mali_regulator_set_voltage(int min_uV, int max_uV)
{
	int voltage;
#ifndef CONFIG_MALI_DVFS
	min_uV = mali_gpu_vol;
	max_uV = mali_gpu_vol;
#endif

	_mali_osk_mutex_wait(mali_dvfs_lock);

	if( IS_ERR_OR_NULL(g3d_regulator) )
	{
		MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n"));
		return;
	}
	MALI_DEBUG_PRINT(2, ("= regulator_set_voltage: %d, %d \n",min_uV, max_uV));
	regulator_set_voltage(g3d_regulator,min_uV,max_uV);
	voltage = regulator_get_voltage(g3d_regulator);
	mali_gpu_vol = voltage;
	MALI_DEBUG_PRINT(1, ("= regulator_get_voltage: %d \n",mali_gpu_vol));

	_mali_osk_mutex_signal(mali_dvfs_lock);
}
_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
{
	u32 raw_index = _mali_osk_atomic_read(&profile_insert_index);

	_mali_osk_mutex_wait(lock);

	if (index < profile_mask) {
		if ((raw_index & ~profile_mask) != 0) {
			index += raw_index;
			index &= profile_mask;
		}

		if (prof_state != MALI_PROFILING_STATE_RETURN) {
			_mali_osk_mutex_signal(lock);
			return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
		}

		if(index >= raw_index) {
			_mali_osk_mutex_signal(lock);
			return _MALI_OSK_ERR_FAULT;
		}

		*timestamp = profile_entries[index].timestamp;
		*event_id = profile_entries[index].event_id;
		data[0] = profile_entries[index].data[0];
		data[1] = profile_entries[index].data[1];
		data[2] = profile_entries[index].data[2];
		data[3] = profile_entries[index].data[3];
		data[4] = profile_entries[index].data[4];
	} else {
		_mali_osk_mutex_signal(lock);
		return _MALI_OSK_ERR_FAULT;
	}

	_mali_osk_mutex_signal(lock);
	return _MALI_OSK_ERR_OK;
}
Ejemplo n.º 16
0
/*
 * IOCTL operation; Import fd to  UMP memory
 */
int ump_ion_import_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
{
	_ump_uk_ion_import_s user_interaction;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block * blocks;
	unsigned long num_blocks;
	struct ion_handle *ion_hnd;
	struct scatterlist *sg;
	struct scatterlist *sg_ion;
	unsigned long i = 0;

	ump_session_memory_list_element * session_memory_element = NULL;
	if (ion_client_ump==NULL)
	    ion_client_ump = ion_client_create(ion_exynos, -1, "ump");

	/* Sanity check input parameters */
	if (NULL == argument || NULL == session_data)
	{
		MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
		return -ENOTTY;
	}

	/* Copy the user space memory to kernel space (so we safely can read it) */
	if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
	{
		MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	user_interaction.ctx = (void *) session_data;

	/* translate fd to secure ID*/
	ion_hnd = ion_import_fd(ion_client_ump, user_interaction.ion_fd);
	sg_ion = ion_map_dma(ion_client_ump,ion_hnd);

	blocks = (ump_dd_physical_block*)_mali_osk_malloc(sizeof(ump_dd_physical_block)*1024);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks in ump_ioctl_allocate()\n"));
		return -ENOMEM;
	}

	sg = sg_ion;
	do {
		blocks[i].addr = sg_phys(sg);
		blocks[i].size = sg_dma_len(sg);
		i++;
		if (i>=1024) {
			_mali_osk_free(blocks);
			MSG_ERR(("ion_import fail() in ump_ioctl_allocate()\n"));
			return -EFAULT;
		}
		sg = sg_next(sg);
	} while(sg);

	num_blocks = i;

	/* Initialize the session_memory_element, and add it to the session object */
	session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));

	if (NULL == session_memory_element)
	{
		_mali_osk_free(blocks);
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks);
	if (UMP_DD_HANDLE_INVALID == ump_handle)
	{
		_mali_osk_free(session_memory_element);
		_mali_osk_free(blocks);
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	session_memory_element->mem = (ump_dd_mem*)ump_handle;
	_mali_osk_mutex_wait(session_data->lock);
	_mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
	_mali_osk_mutex_signal(session_data->lock);
	ion_unmap_dma(ion_client_ump,ion_hnd);
	ion_free(ion_client_ump, ion_hnd);

	_mali_osk_free(blocks);

	user_interaction.secure_id = ump_dd_secure_id_get(ump_handle);
	user_interaction.size = ump_dd_size_get(ump_handle);
	user_interaction.ctx = NULL;

	if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
	{
		/* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */

		MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));

		return -EFAULT;
	}
	return 0; /* success */
}
/**
*  function@_mali_ukk_mem_allocate - allocate mali memory
*/
_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
{
	struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_backend *mem_backend = NULL;
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));

	/* Check if the address is allocated
	*/
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);

	if (unlikely(mali_vma_node)) {
		MALI_DEBUG_ASSERT(0);
		return _MALI_OSK_ERR_FAULT;
	}
	/**
	*create mali memory allocation
	*/

	mali_allocation = mali_mem_allocation_struct_create(session);

	if (mali_allocation == NULL) {
		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
		return _MALI_OSK_ERR_NOMEM;
	}
	mali_allocation->psize = args->psize;
	mali_allocation->vsize = args->vsize;

	/* MALI_MEM_OS if need to support mem resize,
	 * or MALI_MEM_BLOCK if have dedicated memory,
	 * or MALI_MEM_OS,
	 * or MALI_MEM_SWAP.
	 */
	if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
		mali_allocation->type = MALI_MEM_SWAP;
	} else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
		mali_allocation->type = MALI_MEM_OS;
		mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
	} else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
		mali_allocation->type = MALI_MEM_BLOCK;
	} else {
		mali_allocation->type = MALI_MEM_OS;
	}

	/**
	*add allocation node to RB tree for index
	*/
	mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
	mali_allocation->mali_vma_node.vm_node.size = args->vsize;

	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);

	mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
	if (mali_allocation->backend_handle < 0) {
		ret = _MALI_OSK_ERR_NOMEM;
		MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
		goto failed_alloc_backend;
	}


	mem_backend->mali_allocation = mali_allocation;
	mem_backend->type = mali_allocation->type;

	mali_allocation->mali_mapping.addr = args->gpu_vaddr;

	/* set gpu mmu propery */
	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
	/* do prepare for MALI mapping */
	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);

		ret = mali_mem_mali_map_prepare(mali_allocation);
		if (0 != ret) {
			_mali_osk_mutex_signal(session->memory_lock);
			goto failed_prepare_map;
		}
		_mali_osk_mutex_signal(session->memory_lock);
	}

	if (mali_allocation->psize == 0) {
		mem_backend->os_mem.count = 0;
		INIT_LIST_HEAD(&mem_backend->os_mem.pages);
		goto done;
	}

	if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
		mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
		mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
		/* init for defer bind backend*/
		mem_backend->os_mem.count = 0;
		INIT_LIST_HEAD(&mem_backend->os_mem.pages);

		goto done;
	}
	/**
	*allocate physical memory
	*/
	if (likely(mali_allocation->psize > 0)) {

		if (mem_backend->type == MALI_MEM_OS) {
			retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			/* try to allocated from BLOCK memory first, then try OS memory if failed.*/
			if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
				retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
				mem_backend->type = MALI_MEM_OS;
				mali_allocation->type = MALI_MEM_OS;
			}
		} else if (MALI_MEM_SWAP == mem_backend->type) {
			retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
		} else {
			/* ONLY support mem_os type */
			MALI_DEBUG_ASSERT(0);
		}

		if (retval) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
			goto failed_alloc_pages;
		}
	}

	/**
	*map to GPU side
	*/
	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);
		/* Map on Mali */

		if (mem_backend->type == MALI_MEM_OS) {
			ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
						   mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);

		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
						mali_allocation->mali_mapping.properties);
		} else if (mem_backend->type == MALI_MEM_SWAP) {
			ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
						     mali_allocation->mali_mapping.properties);
		} else { /* unsupport type */
			MALI_DEBUG_ASSERT(0);
		}

		_mali_osk_mutex_signal(session->memory_lock);
	}
done:
	if (MALI_MEM_OS == mem_backend->type) {
		atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
	} else if (MALI_MEM_BLOCK == mem_backend->type) {
		atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
	} else {
		MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
		atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
	}

	if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
		session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
	}
	return _MALI_OSK_ERR_OK;

failed_alloc_pages:
	mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
failed_prepare_map:
	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
failed_alloc_backend:

	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
	mali_mem_allocation_struct_destory(mali_allocation);

	return ret;
}
static _mali_osk_errcode_t mali_mem_resize(struct mali_session_data *session, mali_mem_backend *mem_backend, u32 physical_size)
{
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	mali_mem_os_mem tmp_os_mem;
	s32 change_page_count;

	MALI_DEBUG_ASSERT_POINTER(session);
	MALI_DEBUG_ASSERT_POINTER(mem_backend);
	MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
	MALI_DEBUG_ASSERT(0 == physical_size %  MALI_MMU_PAGE_SIZE);

	mali_allocation = mem_backend->mali_allocation;
	MALI_DEBUG_ASSERT_POINTER(mali_allocation);

	MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE & mali_allocation->flags);
	MALI_DEBUG_ASSERT(MALI_MEM_OS == mali_allocation->type);

	mutex_lock(&mem_backend->mutex);

	/* Do resize*/
	if (physical_size > mem_backend->size) {
		u32 add_size = physical_size - mem_backend->size;

		MALI_DEBUG_ASSERT(0 == add_size %  MALI_MMU_PAGE_SIZE);

		/* Allocate new pages from os mem */
		retval = mali_mem_os_alloc_pages(&tmp_os_mem, add_size);

		if (retval) {
			if (-ENOMEM == retval) {
				ret = _MALI_OSK_ERR_NOMEM;
			} else {
				ret = _MALI_OSK_ERR_FAULT;
			}
			MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory allocation failed !\n"));
			goto failed_alloc_memory;
		}

		MALI_DEBUG_ASSERT(tmp_os_mem.count == add_size / MALI_MMU_PAGE_SIZE);

		/* Resize the memory of the backend */
		ret = mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);

		if (ret) {
			MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory	resizing failed !\n"));
			goto failed_resize_pages;
		}

		/*Resize cpu mapping */
		if (NULL != mali_allocation->cpu_mapping.vma) {
			ret = mali_mem_os_resize_cpu_map_locked(mem_backend, mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start  + mem_backend->size, add_size);
			if (unlikely(ret != _MALI_OSK_ERR_OK)) {
				MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: cpu mapping failed !\n"));
				goto  failed_cpu_map;
			}
		}

		/* Resize mali mapping */
		_mali_osk_mutex_wait(session->memory_lock);
		ret = mali_mem_mali_map_resize(mali_allocation, physical_size);

		if (ret) {
			MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_resize: mali map resize fail !\n"));
			goto failed_gpu_map;
		}

		ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, mali_allocation->mali_vma_node.vm_node.start,
					   mali_allocation->psize / MALI_MMU_PAGE_SIZE, add_size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
		if (ret) {
			MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: mali mapping failed !\n"));
			goto failed_gpu_map;
		}

		_mali_osk_mutex_signal(session->memory_lock);
	} else {
		u32 dec_size, page_count;
		u32 vaddr = 0;
		INIT_LIST_HEAD(&tmp_os_mem.pages);
		tmp_os_mem.count = 0;

		dec_size = mem_backend->size - physical_size;
		MALI_DEBUG_ASSERT(0 == dec_size %  MALI_MMU_PAGE_SIZE);

		page_count = dec_size / MALI_MMU_PAGE_SIZE;
		vaddr = mali_allocation->mali_vma_node.vm_node.start + physical_size;

		/* Resize the memory of the backend */
		ret = mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, physical_size / MALI_MMU_PAGE_SIZE, page_count);

		if (ret) {
			MALI_DEBUG_PRINT(4, ("_mali_ukk_mem_resize: mali map resize failed!\n"));
			goto failed_resize_pages;
		}

		/* Resize mali map */
		_mali_osk_mutex_wait(session->memory_lock);
		mali_mem_mali_map_free(session, dec_size, vaddr, mali_allocation->flags);
		_mali_osk_mutex_signal(session->memory_lock);

		/* Zap cpu mapping */
		if (0 != mali_allocation->cpu_mapping.addr) {
			MALI_DEBUG_ASSERT(NULL != mali_allocation->cpu_mapping.vma);
			zap_vma_ptes(mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + physical_size, dec_size);
		}

		/* Free those extra pages */
		mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
	}

	/* Resize memory allocation and memory backend */
	change_page_count = (s32)(physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE;
	mali_allocation->psize = physical_size;
	mem_backend->size = physical_size;
	mutex_unlock(&mem_backend->mutex);

	if (change_page_count > 0) {
		atomic_add(change_page_count, &session->mali_mem_allocated_pages);
		if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
			session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
		}

	} else {
		atomic_sub((s32)(-change_page_count), &session->mali_mem_allocated_pages);
	}

	return _MALI_OSK_ERR_OK;

failed_gpu_map:
	_mali_osk_mutex_signal(session->memory_lock);
failed_cpu_map:
	if (physical_size > mem_backend->size) {
		mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, mem_backend->size / MALI_MMU_PAGE_SIZE,
					 (physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE);
	} else {
		mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
	}
failed_resize_pages:
	if (0 != tmp_os_mem.count)
		mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
failed_alloc_memory:

	mutex_unlock(&mem_backend->mutex);
	return ret;
}
Ejemplo n.º 19
0
void mali_pm_exec_lock(void)
{
	_mali_osk_mutex_wait(pm_lock_exec);
}
Ejemplo n.º 20
0
_mali_osk_errcode_t _ump_ukk_allocate(_ump_uk_allocate_s *user_interaction)
{
	ump_session_data *session_data = NULL;
	ump_dd_mem *new_allocation = NULL;
	ump_session_memory_list_element *session_memory_element = NULL;
	int ret;

	DEBUG_ASSERT_POINTER(user_interaction);
	DEBUG_ASSERT_POINTER(user_interaction->ctx);

	session_data = (ump_session_data *) user_interaction->ctx;

	session_memory_element = _mali_osk_calloc(1, sizeof(ump_session_memory_list_element));
	if (NULL == session_memory_element) {
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return _MALI_OSK_ERR_NOMEM;
	}


	new_allocation = _mali_osk_calloc(1, sizeof(ump_dd_mem));
	if (NULL == new_allocation) {
		_mali_osk_free(session_memory_element);
		DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
		return _MALI_OSK_ERR_NOMEM;
	}

	/* Initialize the part of the new_allocation that we know so for */
	_mali_osk_atomic_init(&new_allocation->ref_count, 1);
	if (0 == (UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints))
		new_allocation->is_cached = 0;
	else new_allocation->is_cached = 1;

	/* Special case a size of 0, we should try to emulate what malloc does
	 * in this case, which is to return a valid pointer that must be freed,
	 * but can't be dereferenced */
	if (0 == user_interaction->size) {
		/* Emulate by actually allocating the minimum block size */
		user_interaction->size = 1;
	}

	/* Page align the size */
	new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size);
	new_allocation->lock_usage = UMP_NOT_LOCKED;

	/* Now, ask the active memory backend to do the actual memory allocation */
	if (!device.backend->allocate(device.backend->ctx, new_allocation)) {
		DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n",
			    new_allocation->size_bytes,
			    (unsigned long)user_interaction->size));
		_mali_osk_free(new_allocation);
		_mali_osk_free(session_memory_element);
		return _MALI_OSK_ERR_INVALID_FUNC;
	}
	new_allocation->hw_device = _UMP_UK_USED_BY_CPU;
	new_allocation->ctx = device.backend->ctx;
	new_allocation->release_func = device.backend->release;

	/* Initialize the session_memory_element, and add it to the session object */
	session_memory_element->mem = new_allocation;
	_mali_osk_mutex_wait(session_data->lock);
	_mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
	_mali_osk_mutex_signal(session_data->lock);

	/* Create a secure ID for this allocation */
	ret = ump_random_mapping_insert(device.secure_id_map, new_allocation);
	if (unlikely(ret)) {
		new_allocation->release_func(new_allocation->ctx, new_allocation);
		_mali_osk_free(session_memory_element);
		_mali_osk_free(new_allocation);
		DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
		return _MALI_OSK_ERR_INVALID_FUNC;
	}

	user_interaction->secure_id = new_allocation->secure_id;
	user_interaction->size = new_allocation->size_bytes;
	DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n",
		    new_allocation->secure_id,
		    new_allocation->size_bytes));

	return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_ukk_attach_ump_mem(_mali_uk_attach_ump_mem_s *args)
{
    ump_dd_handle ump_mem;
    struct mali_session_data *session;
    mali_mem_allocation *descriptor;
    int md, ret;

    MALI_DEBUG_ASSERT_POINTER(args);
    MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);

    session = (struct mali_session_data *)args->ctx;
    MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);

    /* check arguments */
    /* NULL might be a valid Mali address */
    if (!args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

    /* size must be a multiple of the system page size */
    if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

    MALI_DEBUG_PRINT(3,
                     ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
                      args->secure_id, args->mali_address, args->size));

    ump_mem = ump_dd_handle_create_from_secure_id((int)args->secure_id);

    if (UMP_DD_HANDLE_INVALID == ump_mem) MALI_ERROR(_MALI_OSK_ERR_FAULT);

    descriptor = mali_mem_descriptor_create(session, MALI_MEM_UMP);
    if (NULL == descriptor) {
        ump_dd_reference_release(ump_mem);
        MALI_ERROR(_MALI_OSK_ERR_NOMEM);
    }

    descriptor->ump_mem.handle = ump_mem;
    descriptor->mali_mapping.addr = args->mali_address;
    descriptor->size = args->size;
    descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
    descriptor->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;

    if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
        descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
    }

    _mali_osk_mutex_wait(session->memory_lock);

    ret = mali_ump_map(session, descriptor);
    if (0 != ret) {
        _mali_osk_mutex_signal(session->memory_lock);
        ump_dd_reference_release(ump_mem);
        mali_mem_descriptor_destroy(descriptor);
        MALI_ERROR(_MALI_OSK_ERR_NOMEM);
    }

    _mali_osk_mutex_signal(session->memory_lock);


    if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
        ump_dd_reference_release(ump_mem);
        mali_mem_descriptor_destroy(descriptor);
        MALI_ERROR(_MALI_OSK_ERR_FAULT);
    }

    args->cookie = md;

    MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));

    MALI_SUCCESS;
}
Ejemplo n.º 22
0
mali_bool mali_clk_set_rate(unsigned int clk, unsigned int mhz)
{
	unsigned long rate = 0;
	mali_bool bis_vpll = MALI_TRUE;

#ifndef CONFIG_VPLL_USE_FOR_TVENC
	bis_vpll = MALI_TRUE;
#endif

#ifndef CONFIG_MALI_DVFS
	clk = mali_gpu_clk;
#endif

	_mali_osk_mutex_wait(mali_dvfs_lock);

	if (mali_clk_get(bis_vpll) == MALI_FALSE)
	{
		printk("~~~~~~~~ERROR: [%s] %d\n ",__func__,__LINE__);
		return MALI_FALSE;
	}

	rate = (unsigned long)clk * (unsigned long)mhz;
	MALI_DEBUG_PRINT(2,("= clk_set_rate : %d , %d \n",clk, mhz ));

	if (bis_vpll)
	{
		clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
		//clk_set_parent(vpll_src_clock, ext_xtal_clock);
		clk_set_parent(sclk_vpll_clock, fout_vpll_clock);

		clk_set_parent(mali_parent_clock, sclk_vpll_clock);
		clk_set_parent(mali_clock, mali_parent_clock);
	}
	else
	{
		clk_set_parent(mali_parent_clock, mpll_clock);
		clk_set_parent(mali_clock, mali_parent_clock);
	}

	if (clk_enable(mali_clock) < 0)
	{
		printk("~~~~~~~~ERROR: [%s] %d\n ",__func__,__LINE__);
		return MALI_FALSE;
	}

	clk_set_rate(mali_clock, rate);
	rate = clk_get_rate(mali_clock);

	if (bis_vpll)
		mali_gpu_clk = (int)(rate / mhz);
	else
		mali_gpu_clk = (int)((rate + 500000) / mhz);

	GPU_MHZ = mhz;
	MALI_DEBUG_PRINT(2,("= clk_get_rate: %d \n",mali_gpu_clk));

	mali_clk_put(MALI_FALSE);

	_mali_osk_mutex_signal(mali_dvfs_lock);

	return MALI_TRUE;
}
_mali_osk_errcode_t _mali_ukk_map_external_mem(_mali_uk_map_external_mem_s *args)
{
	struct mali_session_data *session;
	mali_mem_allocation * descriptor;
	int md;
	_mali_osk_errcode_t err;

	MALI_DEBUG_ASSERT_POINTER(args);
	MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);

	session = (struct mali_session_data *)args->ctx;
	MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);

	/* check arguments */
	/* NULL might be a valid Mali address */
	if (! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

	/* size must be a multiple of the system page size */
	if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);

	MALI_DEBUG_PRINT(3,
	                 ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
	                  (void*)args->phys_addr,
	                  (void*)(args->phys_addr + args->size -1),
	                  (void*)args->mali_address)
	                );

	/* Validate the mali physical range */
	if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size)) {
		return _MALI_OSK_ERR_FAULT;
	}

	descriptor = mali_mem_descriptor_create(session, MALI_MEM_EXTERNAL);
	if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);

	descriptor->mali_mapping.addr = args->mali_address;
	descriptor->size = args->size;

	if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
		descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
	}

	_mali_osk_mutex_wait(session->memory_lock);
	{
		u32 virt = descriptor->mali_mapping.addr;
		u32 phys = args->phys_addr;
		u32 size = args->size;

		err = mali_mem_mali_map_prepare(descriptor);
		if (_MALI_OSK_ERR_OK != err) {
			_mali_osk_mutex_signal(session->memory_lock);
			mali_mem_descriptor_destroy(descriptor);
			return _MALI_OSK_ERR_NOMEM;
		}

		mali_mmu_pagedir_update(session->page_directory, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);

		if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
			mali_mmu_pagedir_update(session->page_directory, virt + size, phys, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
		}
	}
	_mali_osk_mutex_signal(session->memory_lock);

	if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
		_mali_osk_mutex_wait(session->memory_lock);
		mali_mem_external_release(descriptor);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		MALI_ERROR(_MALI_OSK_ERR_FAULT);
	}

	args->cookie = md;

	MALI_SUCCESS;
}
Ejemplo n.º 24
0
/**
*  function@_mali_ukk_mem_allocate - allocate mali memory
*/
_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
{
	struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
	mali_mem_backend *mem_backend = NULL;
	_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
	int retval = 0;
	mali_mem_allocation *mali_allocation = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));

	/* Check if the address is allocated
	*  Can we trust User mode?
	*/
	mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
	if (unlikely(mali_vma_node)) {
		/* Not support yet */
		MALI_DEBUG_ASSERT(0);
		return _MALI_OSK_ERR_FAULT;
	}

	/**
	*create mali memory allocation
	*/
	mali_allocation = mali_mem_allocation_struct_create(session);

	if (mali_allocation == NULL) {
		MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
		return _MALI_OSK_ERR_NOMEM;
	}
	mali_allocation->psize = args->psize;
	mali_allocation->vsize = args->vsize;

	/* check if have dedicated memory */
	if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
		mali_allocation->type = MALI_MEM_BLOCK;
	} else {
		mali_allocation->type = MALI_MEM_OS;
	}

	/**
	*add allocation node to RB tree for index
	*/
	mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
	mali_allocation->mali_vma_node.vm_node.size = args->vsize;

	mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);

	/* check if need to allocate backend */
	if (mali_allocation->psize == 0)
		return _MALI_OSK_ERR_OK;

	/**
	*allocate physical backend & pages
	*/
	if (likely(mali_allocation->psize > 0)) {
		mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
		if (mali_allocation->backend_handle < 0) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
			goto failed_alloc_backend;
		}

		mem_backend->mali_allocation = mali_allocation;
		mem_backend->type = mali_allocation->type;

		if (mem_backend->type == MALI_MEM_OS) {
			retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			/* try to allocated from BLOCK memory first, then try OS memory if failed.*/
			if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
				retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
				mem_backend->type = MALI_MEM_OS;
				mali_allocation->type = MALI_MEM_OS;
			}
		} else {
			/* ONLY support mem_os type */
			MALI_DEBUG_ASSERT(0);
		}

		if (retval) {
			ret = _MALI_OSK_ERR_NOMEM;
			MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
			goto failed_alloc_pages;
		}
	}

	/**
	*map to GPU side
	*/
	mali_allocation->mali_mapping.addr = args->gpu_vaddr;

	/* set gpu mmu propery */
	_mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);

	if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
		_mali_osk_mutex_wait(session->memory_lock);
		/* Map on Mali */
		ret = mali_mem_mali_map_prepare(mali_allocation);
		if (0 != ret) {
			MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
			goto failed_gpu_map;
		}
		/* only support os memory type now */
		if (mem_backend->type == MALI_MEM_OS) {
			mali_mem_os_mali_map(mem_backend, args->gpu_vaddr,
					     mali_allocation->mali_mapping.properties);
		} else if (mem_backend->type == MALI_MEM_BLOCK) {
			mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
						mali_allocation->mali_mapping.properties);
		} else {
			/* Not support yet */
			MALI_DEBUG_ASSERT(0);
		}
		session->mali_mem_array[mem_backend->type] += mem_backend->size;
		if (session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK] > session->max_mali_mem_allocated) {
			session->max_mali_mem_allocated = session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK];
		}
		_mali_osk_mutex_signal(session->memory_lock);
	}

	return _MALI_OSK_ERR_OK;

failed_gpu_map:
	_mali_osk_mutex_signal(session->memory_lock);
	if (mem_backend->type == MALI_MEM_OS) {
		mali_mem_os_free(&mem_backend->os_mem);
	} else {
		mali_mem_block_free(&mem_backend->block_mem);
	}
failed_alloc_pages:
	mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
failed_alloc_backend:

	mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
	mali_mem_allocation_struct_destory(mali_allocation);

	return ret;
}
mali_mem_allocation *mali_mem_block_alloc(u32 mali_addr, u32 size, struct vm_area_struct *vma, struct mali_session_data *session)
{
	_mali_osk_errcode_t err;
	mali_mem_allocation *descriptor;
	block_allocator *info;
	u32 left;
	block_info *last_allocated = NULL;
	block_allocator_allocation *ret_allocation;
	u32 offset = 0;

	size = ALIGN(size, MALI_BLOCK_SIZE);

	info = mali_mem_block_gobal_allocator;
	if (NULL == info) return NULL;

	left = size;
	MALI_DEBUG_ASSERT(0 != left);

	descriptor = mali_mem_descriptor_create(session, MALI_MEM_BLOCK);
	if (NULL == descriptor) {
		return NULL;
	}

	descriptor->mali_mapping.addr = mali_addr;
	descriptor->size = size;
	descriptor->cpu_mapping.addr = (void __user *)vma->vm_start;
	descriptor->cpu_mapping.ref = 1;

	if (VM_SHARED == (VM_SHARED & vma->vm_flags)) {
		descriptor->mali_mapping.properties = MALI_MMU_FLAGS_DEFAULT;
	} else {
		/* Cached Mali memory mapping */
		descriptor->mali_mapping.properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
		vma->vm_flags |= VM_SHARED;
	}

	ret_allocation = &descriptor->block_mem.mem;

	ret_allocation->mapping_length = 0;

	_mali_osk_mutex_wait(session->memory_lock);
	mutex_lock(&info->mutex);

	if (left > (info->free_blocks * MALI_BLOCK_SIZE)) {
		MALI_DEBUG_PRINT(2, ("Mali block allocator: not enough free blocks to service allocation (%u)\n", left));
		mutex_unlock(&info->mutex);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		return NULL;
	}

	err = mali_mem_mali_map_prepare(descriptor);
	if (_MALI_OSK_ERR_OK != err) {
		mutex_unlock(&info->mutex);
		_mali_osk_mutex_signal(session->memory_lock);
		mali_mem_descriptor_destroy(descriptor);
		return NULL;
	}

	while ((left > 0) && (info->first_free)) {
		block_info *block;
		u32 phys_addr;
		u32 current_mapping_size;

		block = info->first_free;
		info->first_free = info->first_free->next;
		block->next = last_allocated;
		last_allocated = block;

		phys_addr = get_phys(info, block);

		if (MALI_BLOCK_SIZE < left) {
			current_mapping_size = MALI_BLOCK_SIZE;
		} else {
			current_mapping_size = left;
		}

		mali_mem_block_mali_map(descriptor, phys_addr, mali_addr + offset, current_mapping_size);
		if (mali_mem_block_cpu_map(descriptor, vma, phys_addr, offset, current_mapping_size, info->cpu_usage_adjust)) {
			/* release all memory back to the pool */
			while (last_allocated) {
				/* This relinks every block we've just allocated back into the free-list */
				block = last_allocated->next;
				last_allocated->next = info->first_free;
				info->first_free = last_allocated;
				last_allocated = block;
			}

			mutex_unlock(&info->mutex);
			_mali_osk_mutex_signal(session->memory_lock);

			mali_mem_mali_map_free(descriptor);
			mali_mem_descriptor_destroy(descriptor);

			return NULL;
		}

		left -= current_mapping_size;
		offset += current_mapping_size;
		ret_allocation->mapping_length += current_mapping_size;

		--info->free_blocks;
	}

	mutex_unlock(&info->mutex);
	_mali_osk_mutex_signal(session->memory_lock);

	MALI_DEBUG_ASSERT(0 == left);

	/* Record all the information about this allocation */
	ret_allocation->last_allocated = last_allocated;
	ret_allocation->info = info;

	return descriptor;
}