_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
{
	ump_session_data * session_data = NULL;
	ump_dd_mem *new_allocation = NULL;
	ump_session_memory_list_element * session_memory_element = NULL;
	int map_id;

	DEBUG_ASSERT_POINTER( user_interaction );
	DEBUG_ASSERT_POINTER( user_interaction->ctx );

	session_data = (ump_session_data *) user_interaction->ctx;

	session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
	if (NULL == session_memory_element)
	{
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return _MALI_OSK_ERR_NOMEM;
	}


	new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem));
	if (NULL==new_allocation)
	{
		_mali_osk_free(session_memory_element);
		DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
		return _MALI_OSK_ERR_NOMEM;
	}

	/* Create a secure ID for this allocation */
	_mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
	map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation);

	if (map_id < 0)
	{
		_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
		_mali_osk_free(session_memory_element);
		_mali_osk_free(new_allocation);
		DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
		return - _MALI_OSK_ERR_INVALID_FUNC;
	}

	/* Initialize the part of the new_allocation that we know so for */
	new_allocation->secure_id = (ump_secure_id)map_id;
	_mali_osk_atomic_init(&new_allocation->ref_count,1);
	if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) )
		 new_allocation->is_cached = 0;
	else new_allocation->is_cached = 1;

	/* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */
	if (0 == user_interaction->size)
	{
		user_interaction->size = 1; /* emulate by actually allocating the minimum block size */
	}

	new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */
	new_allocation->lock_usage = UMP_NOT_LOCKED;

	/* Now, ask the active memory backend to do the actual memory allocation */
	if (!device.backend->allocate( device.backend->ctx, new_allocation ) )
	{
		DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size));
		ump_descriptor_mapping_free(device.secure_id_map, map_id);
		_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
		_mali_osk_free(new_allocation);
		_mali_osk_free(session_memory_element);
		return _MALI_OSK_ERR_INVALID_FUNC;
	}
	new_allocation->hw_device = _UMP_UK_USED_BY_CPU;
	new_allocation->ctx = device.backend->ctx;
	new_allocation->release_func = device.backend->release;

	_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);

	/* Initialize the session_memory_element, and add it to the session object */
	session_memory_element->mem = new_allocation;
	_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	_mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
	_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);

	user_interaction->secure_id = new_allocation->secure_id;
	user_interaction->size = new_allocation->size_bytes;
	DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes));

	return _MALI_OSK_ERR_OK;
}
UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks)
{
	ump_dd_mem * mem;
	unsigned long size_total = 0;
	int map_id;
	u32 i;

	/* Go through the input blocks and verify that they are sane */
	for (i=0; i < num_blocks; i++)
	{
		unsigned long addr = blocks[i].addr;
		unsigned long size = blocks[i].size;

		DBG_MSG(5, ("Adding physical memory to new handle. Address: 0x%08lx, size: %lu\n", addr, size));
		size_total += blocks[i].size;

		if (0 != UMP_ADDR_ALIGN_OFFSET(addr))
		{
			MSG_ERR(("Trying to create UMP memory from unaligned physical address. Address: 0x%08lx\n", addr));
			return UMP_DD_HANDLE_INVALID;
		}

		if (0 != UMP_ADDR_ALIGN_OFFSET(size))
		{
			MSG_ERR(("Trying to create UMP memory with unaligned size. Size: %lu\n", size));
			return UMP_DD_HANDLE_INVALID;
		}
	}

	/* Allocate the ump_dd_mem struct for this allocation */
	mem = _mali_osk_malloc(sizeof(*mem));
	if (NULL == mem)
	{
		DBG_MSG(1, ("Could not allocate ump_dd_mem in ump_dd_handle_create_from_phys_blocks()\n"));
		return UMP_DD_HANDLE_INVALID;
	}

	/* Find a secure ID for this allocation */
	_mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
	map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem);

	if (map_id < 0)
	{
		_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
		_mali_osk_free(mem);
		DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
		return UMP_DD_HANDLE_INVALID;
	}

	/* Now, make a copy of the block information supplied by the user */
	mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks);
	if (NULL == mem->block_array)
	{
		ump_descriptor_mapping_free(device.secure_id_map, map_id);
		_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
		_mali_osk_free(mem);
		DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n"));
		return UMP_DD_HANDLE_INVALID;
	}

	_mali_osk_memcpy(mem->block_array, blocks, sizeof(ump_dd_physical_block) * num_blocks);

	/* And setup the rest of the ump_dd_mem struct */
	_mali_osk_atomic_init(&mem->ref_count, 1);
	mem->secure_id = (ump_secure_id)map_id;
	mem->size_bytes = size_total;
	mem->nr_blocks = num_blocks;
	mem->backend_info = NULL;
	mem->ctx = NULL;
	mem->release_func = phys_blocks_release;
	/* For now UMP handles created by ump_dd_handle_create_from_phys_blocks() is forced to be Uncached */
	mem->is_cached = 0;
	mem->hw_device = _UMP_UK_USED_BY_CPU;
	mem->lock_usage = UMP_NOT_LOCKED;

	_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
	DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));

	return (ump_dd_handle)mem;
}
MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void)
{
	MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n"));
	_mali_osk_lock_signal(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW);
}
示例#4
0
mali_bool mali_clk_set_rate(unsigned int clk, unsigned int mhz)
{
	unsigned long rate = 0;
	mali_bool bis_vpll = MALI_TRUE;

#ifndef CONFIG_VPLL_USE_FOR_TVENC
	bis_vpll = MALI_TRUE;
#endif

	_mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);

	if (mali_clk_get(bis_vpll) == MALI_FALSE)
		return MALI_FALSE;

	rate = (unsigned long)clk * (unsigned long)mhz;
	MALI_DEBUG_PRINT(3,("= clk_set_rate : %d , %d \n",clk, mhz ));

	if (bis_vpll)
	{
		clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
		clk_set_parent(vpll_src_clock, ext_xtal_clock);
		clk_set_parent(sclk_vpll_clock, fout_vpll_clock);

		clk_set_parent(mali_parent_clock, sclk_vpll_clock);
		clk_set_parent(mali_clock, mali_parent_clock);
	}
	else
	{
		clk_set_parent(mali_parent_clock, mpll_clock);
		clk_set_parent(mali_clock, mali_parent_clock);
	}

	if (clk_enable(mali_clock) < 0)
		return MALI_FALSE;

#if MALI_TIMELINE_PROFILING_ENABLED
    _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
                               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
                               MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_FREQ,
                               rate, 0, 0, 0, 0);
#endif

	clk_set_rate(mali_clock, rate);
	rate = clk_get_rate(mali_clock);

#if MALI_TIMELINE_PROFILING_ENABLED
    _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
                               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
                               MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_FREQ,
                               rate, 1, 0, 0, 0);
#endif

	if (bis_vpll)
		mali_gpu_clk = (int)(rate / mhz);
	else
		mali_gpu_clk = (int)((rate + 500000) / mhz);

	GPU_MHZ = mhz;
	MALI_DEBUG_PRINT(3,("= clk_get_rate: %d \n",mali_gpu_clk));

	mali_clk_put(MALI_FALSE);

	_mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);

	return MALI_TRUE;
}
示例#5
0
mali_bool mali_clk_set_rate(unsigned int clk, unsigned int mhz)
{
	unsigned long rate = 0;
	mali_bool bis_vpll = MALI_TRUE;

#ifndef CONFIG_VPLL_USE_FOR_TVENC
	bis_vpll = MALI_TRUE;
#endif

#ifndef CONFIG_MALI_DVFS
	clk = mali_gpu_clk;
#endif

	_mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);

	if (mali_clk_get(bis_vpll) == MALI_FALSE)
	{
		printk("~~~~~~~~ERROR: [%s] %d\n ",__func__,__LINE__);
		return MALI_FALSE;
	}

	rate = (unsigned long)clk * (unsigned long)mhz;
	MALI_DEBUG_PRINT(3,("= clk_set_rate : %d , %d \n",clk, mhz ));

	if (bis_vpll)
	{
		clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
		//clk_set_parent(vpll_src_clock, ext_xtal_clock);
		clk_set_parent(sclk_vpll_clock, fout_vpll_clock);

		clk_set_parent(mali_parent_clock, sclk_vpll_clock);
		clk_set_parent(mali_clock, mali_parent_clock);
	}
	else
	{
		clk_set_parent(mali_parent_clock, mpll_clock);
		clk_set_parent(mali_clock, mali_parent_clock);
	}

	if (clk_enable(mali_clock) < 0)
	{
		printk("~~~~~~~~ERROR: [%s] %d\n ",__func__,__LINE__);
		return MALI_FALSE;
	}

	clk_set_rate(mali_clock, rate);
	rate = clk_get_rate(mali_clock);

	if (bis_vpll)
		mali_gpu_clk = (int)(rate / mhz);
	else
		mali_gpu_clk = (int)((rate + 500000) / mhz);

	GPU_MHZ = mhz;
	MALI_DEBUG_PRINT(3,("= clk_get_rate: %d \n",mali_gpu_clk));

	mali_clk_put(MALI_FALSE);

	_mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);

	return MALI_TRUE;
}
void mali_group_unlock(struct mali_group *group)
{
	MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
	_mali_osk_lock_signal(group->lock, _MALI_OSK_LOCKMODE_RW);
}
void mali_clk_set_rate(unsigned int clk, unsigned int mhz)
{
	int err;
	unsigned long rate = (unsigned long)clk * (unsigned long)mhz;
	unsigned int read_val;

	_mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
	MALI_DEBUG_PRINT(3, ("Mali platform: Setting frequency to %d mhz\n", clk));

	if (mali_clk_get() == MALI_FALSE) {
		_mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
		return;
	}

	clk_set_parent(mali_parent_clock, mout_epll_clock);

	do {
		cpu_relax();
		read_val = __raw_readl(EXYNOS4_CLKMUX_STAT_G3D0);
	} while (((read_val >> 4) & 0x7) != 0x1);
	MALI_DEBUG_PRINT(3, ("Mali platform: set to EPLL EXYNOS4270_CLKMUX_STAT_G3D0 : 0x%08x\n", __raw_readl(EXYNOS4270_CLKMUX_STAT_G3D0)));

	err = clk_set_parent(sclk_vpll_clock, ext_xtal_clock);

	if (err)
		MALI_PRINT_ERROR(("sclk_vpll set parent to ext_xtal failed\n"));

	MALI_DEBUG_PRINT(3, ("Mali platform: set_parent_vpll : %8.x \n", (__raw_readl(EXYNOS4_CLKSRC_TOP0) >> 8) & 0x1));

	clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
	clk_set_parent(vpll_src_clock, ext_xtal_clock);

	err = clk_set_parent(sclk_vpll_clock, fout_vpll_clock);

	if (err)
		MALI_PRINT_ERROR(("sclk_vpll set parent to fout_vpll failed\n"));

	MALI_DEBUG_PRINT(3, ("Mali platform: set_parent_vpll : %8.x \n", (__raw_readl(EXYNOS4_CLKSRC_TOP0) >> 8) & 0x1));

	clk_set_parent(mali_parent_clock, sclk_vpll_clock);

	do {
		cpu_relax();
		read_val = __raw_readl(EXYNOS4_CLKMUX_STAT_G3D0);
	} while (((read_val >> 4) & 0x7) != 0x2);

	MALI_DEBUG_PRINT(3, ("SET to VPLL EXYNOS4270_CLKMUX_STAT_G3D0 : 0x%08x\n", __raw_readl(EXYNOS4270_CLKMUX_STAT_G3D0)));

	clk_set_parent(mali_clock, mali_parent_clock);

	if (!atomic_read(&clk_active)) {
		if (clk_enable(mali_clock) < 0) {
			_mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
			return;
		}
		atomic_set(&clk_active, 1);
	}

	err = clk_set_rate(mali_clock, rate);

	if (err)
		MALI_PRINT_ERROR(("Failed to set Mali clock: %d\n", err));

	rate = clk_get_rate(mali_clock);

	MALI_DEBUG_PRINT(1, ("Mali frequency %d\n", rate / mhz));
	GPU_MHZ = mhz;

	mali_gpu_clk = clk;
	mali_clk_put(MALI_FALSE);

	_mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
}
void mali_regulator_set_voltage(int min_uV, int max_uV)
{
	int voltage;
#if !MALI_DVFS_ENABLED
	min_uV = mali_gpu_vol;
	max_uV = mali_gpu_vol;
#endif
#if MALI_VOLTAGE_LOCK
	if (mali_vol_lock_flag == MALI_FALSE) {
		if (min_uV < MALI_BOTTOMLOCK_VOL || max_uV < MALI_BOTTOMLOCK_VOL) {
			min_uV = MALI_BOTTOMLOCK_VOL;
			max_uV = MALI_BOTTOMLOCK_VOL;
		}
	} else if (_mali_osk_atomic_read(&voltage_lock_status) > 0 ) {
		if (min_uV < mali_lock_vol || max_uV < mali_lock_vol) {
#if MALI_DVFS_ENABLED
			int mali_vol_get;
			mali_vol_get = mali_vol_get_from_table(mali_lock_vol);
			if (mali_vol_get) {
				min_uV = mali_vol_get;
				max_uV = mali_vol_get;
			}
#else
			min_uV = mali_lock_vol;
			max_uV = mali_lock_vol;
#endif
		}
	}
#endif

	_mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);

	if( IS_ERR_OR_NULL(g3d_regulator) )
	{
		MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n"));
		return;
	}

    MALI_DEBUG_PRINT(2, ("= regulator_set_voltage: %d, %d \n",min_uV, max_uV));

#if MALI_TIMELINE_PROFILING_ENABLED
    _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
                               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
                               MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_VOLTS,
                               min_uV, max_uV, 1, 0, 0);
#endif

    regulator_set_voltage(g3d_regulator,min_uV,max_uV);
	voltage = regulator_get_voltage(g3d_regulator);

#if MALI_TIMELINE_PROFILING_ENABLED
    _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
                               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
                               MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_VOLTS,
                               voltage, 0, 2, 0, 0);
#endif

	mali_gpu_vol = voltage;
	MALI_DEBUG_PRINT(1, ("= regulator_get_voltage: %d \n",mali_gpu_vol));

	_mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
}
示例#9
0
_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args )
{
	_mali_core_info * current_core;
	_mali_mem_info * current_mem;
	_mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
	void * current_write_pos, ** current_patch_pos;
    u32 adjust_ptr_base;

	/* check input */
	MALI_DEBUG_ASSERT_POINTER(args);
    MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
    MALI_CHECK_NON_NULL(args->system_info, _MALI_OSK_ERR_INVALID_ARGS);

	/* lock the system info */
	_mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );

	/* first check size */
	if (args->size < system_info_size) goto exit_when_locked;

	/* we build a copy of system_info in the user space buffer specified by the user and
     * patch up the pointers. The ukk_private members of _mali_uk_get_system_info_s may
     * indicate a different base address for patching the pointers (normally the
     * address of the provided system_info buffer would be used). This is helpful when
     * the system_info buffer needs to get copied to user space and the pointers need
     * to be in user space.
     */
    if (0 == args->ukk_private)
    {
        adjust_ptr_base = (u32)args->system_info;
    }
    else
    {
        adjust_ptr_base = args->ukk_private;
    }

	/* copy each struct into the buffer, and update its pointers */
	current_write_pos = (void *)args->system_info;

	/* first, the master struct */
	_mali_osk_memcpy(current_write_pos, system_info, sizeof(_mali_system_info));

	/* advance write pointer */
	current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_system_info));

	/* first we write the core info structs, patch starts at master's core_info pointer */
	current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, core_info));

	for (current_core = system_info->core_info; NULL != current_core; current_core = current_core->next)
	{

		/* patch the pointer pointing to this core */
		*current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));

		/* copy the core info */
		_mali_osk_memcpy(current_write_pos, current_core, sizeof(_mali_core_info));

		/* update patch pos */
		current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_core_info, next));

		/* advance write pos in memory */
		current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_core_info));
	}
	/* patching of last patch pos is not needed, since we wrote NULL there in the first place */

	/* then we write the mem info structs, patch starts at master's mem_info pointer */
	current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, mem_info));

	for (current_mem = system_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
	{
		/* patch the pointer pointing to this core */
		*current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));

		/* copy the core info */
		_mali_osk_memcpy(current_write_pos, current_mem, sizeof(_mali_mem_info));

		/* update patch pos */
		current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_mem_info, next));

		/* advance write pos in memory */
		current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_mem_info));
	}
	/* patching of last patch pos is not needed, since we wrote NULL there in the first place */

	err = _MALI_OSK_ERR_OK;
exit_when_locked:
	_mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
    MALI_ERROR(err);
}
示例#10
0
static _mali_osk_errcode_t build_system_info(void)
{
	unsigned int i;
	int err = _MALI_OSK_ERR_FAULT;
	_mali_system_info * new_info, * cleanup;
	_mali_core_info * current_core;
	_mali_mem_info * current_mem;
	u32 new_size = 0;

	/* create a new system info struct */
	MALI_CHECK_NON_NULL(new_info = (_mali_system_info *)_mali_osk_malloc(sizeof(_mali_system_info)), _MALI_OSK_ERR_NOMEM);

	_mali_osk_memset(new_info, 0, sizeof(_mali_system_info));

	/* if an error happens during any of the system_info_fill calls cleanup the new info structs */
	cleanup = new_info;

	/* ask each subsystems to fill in their info */
	for (i = 0; i < SUBSYSTEMS_COUNT; ++i)
	{
		if (NULL != subsystems[i]->system_info_fill)
		{
			err = subsystems[i]->system_info_fill(new_info);
			if (_MALI_OSK_ERR_OK != err) goto error_exit;
		}
	}

	/* building succeeded, calculate the size */

	/* size needed of the system info struct itself */
	new_size = sizeof(_mali_system_info);

	/* size needed for the cores */
	for (current_core = new_info->core_info; NULL != current_core; current_core = current_core->next)
	{
		new_size += sizeof(_mali_core_info);
	}

	/* size needed for the memory banks */
	for (current_mem = new_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
	{
		new_size += sizeof(_mali_mem_info);
	}

	/* lock system info access so a user wont't get a corrupted version */
	_mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );

	/* cleanup the old one */
	cleanup = system_info;
	/* set new info */
	system_info = new_info;
	system_info_size = new_size;

	/* we're safe */
	_mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );

	/* ok result */
	err = _MALI_OSK_ERR_OK;

	/* we share the cleanup routine with the error case */
error_exit:
	if (NULL == cleanup) MALI_ERROR((_mali_osk_errcode_t)err); /* no cleanup needed, return what err contains */

	/* cleanup */
	cleanup_system_info(cleanup);

	/* return whatever err is, we could end up here in both the error and success cases */
	MALI_ERROR((_mali_osk_errcode_t)err);
}
static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
{
    block_allocator * info;
    u32 left;
    block_info * last_allocated = NULL;
    mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
    block_allocator_allocation *ret_allocation;

    MALI_DEBUG_ASSERT_POINTER(ctx);
    MALI_DEBUG_ASSERT_POINTER(descriptor);
    MALI_DEBUG_ASSERT_POINTER(offset);
    MALI_DEBUG_ASSERT_POINTER(alloc_info);

    info = (block_allocator*)ctx;
    left = descriptor->size - *offset;
    MALI_DEBUG_ASSERT(0 != left);

    if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;

    ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) );

    if ( NULL == ret_allocation )
    {
        /* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */
        _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
        return result;
    }

    ret_allocation->start_offset = *offset;
    ret_allocation->mapping_length = 0;

    while ((left > 0) && (info->first_free))
    {
        block_info * block;
        u32 phys_addr;
        u32 padding;
        u32 current_mapping_size;

        block = info->first_free;
        info->first_free = info->first_free->next;
        block->next = last_allocated;
        last_allocated = block;

        phys_addr = get_phys(info, block);

        padding = *offset & (MALI_BLOCK_SIZE-1);

        if (MALI_BLOCK_SIZE - padding < left)
        {
            current_mapping_size = MALI_BLOCK_SIZE - padding;
        }
        else
        {
            current_mapping_size = left;
        }

        if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size))
        {
            MALI_DEBUG_PRINT(1, ("Mapping of physical memory  failed\n"));
            result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
            mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);

            /* release all memory back to the pool */
            while (last_allocated)
            {
                /* This relinks every block we've just allocated back into the free-list */
                block = last_allocated->next;
                last_allocated->next = info->first_free;
                info->first_free = last_allocated;
                last_allocated = block;
            }

            break;
        }

        *offset += current_mapping_size;
        left -= current_mapping_size;
        ret_allocation->mapping_length += current_mapping_size;
    }

    _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);

    if (last_allocated)
    {
        if (left) result = MALI_MEM_ALLOC_PARTIAL;
        else result = MALI_MEM_ALLOC_FINISHED;

        /* Record all the information about this allocation */
        ret_allocation->last_allocated = last_allocated;
        ret_allocation->engine = engine;
        ret_allocation->descriptor = descriptor;

        alloc_info->ctx = info;
        alloc_info->handle = ret_allocation;
        alloc_info->release = block_allocator_release;
    }
    else
    {
        /* Free the allocation information - nothing to be passed back */
        _mali_osk_free( ret_allocation );
    }

    return result;
}
示例#12
0
_mali_osk_errcode_t _mali_ukk_open(void **context)
{
	int i;
    _mali_osk_errcode_t err;
#ifdef MALI_SESSION_MEMORY_USAGE
	struct mali_session_data_list * session_data_list;
#endif
	struct mali_session_data * session_data;

	/* allocated struct to track this session */
	session_data = (struct mali_session_data *)_mali_osk_malloc(sizeof(struct mali_session_data));
    MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_NOMEM);

#ifdef MALI_SESSION_MEMORY_USAGE
	session_data_list = (struct mali_session_data_list *)_mali_osk_malloc(sizeof(struct mali_session_data_list));
	if (session_data_list == NULL)
	{
		_mali_osk_free(session_data);
		return _MALI_OSK_ERR_NOMEM;
	}

	_MALI_OSK_INIT_LIST_HEAD(&session_data_list->list_head);
	session_data_list->pid = _mali_osk_get_pid();
	session_data_list->session_data = session_data;
	session_data->list = session_data_list;
#endif

	_mali_osk_memset(session_data->subsystem_data, 0, sizeof(session_data->subsystem_data));

	/* create a response queue for this session */
	session_data->ioctl_queue = _mali_osk_notification_queue_init();
	if (NULL == session_data->ioctl_queue)
	{
		_mali_osk_free(session_data);
#ifdef MALI_SESSION_MEMORY_USAGE
		_mali_osk_free(session_data_list);
#endif
        MALI_ERROR(_MALI_OSK_ERR_NOMEM);
	}

	MALI_DEBUG_PRINT(3, ("Session starting\n"));

	/* call session_begin on all subsystems */
	for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
	{
		if (NULL != subsystems[i]->session_begin)
		{
			/* subsystem has a session_begin */
			err = subsystems[i]->session_begin(session_data, &session_data->subsystem_data[i], session_data->ioctl_queue);
            MALI_CHECK_GOTO(err == _MALI_OSK_ERR_OK, cleanup);
		}
	}

    *context = (void*)session_data;

#ifdef MALI_SESSION_MEMORY_USAGE
	_mali_osk_lock_wait( session_data_lock, _MALI_OSK_LOCKMODE_RW );
	_mali_osk_list_addtail(&session_data_list->list_head, &session_data_head);
	_mali_osk_lock_signal( session_data_lock, _MALI_OSK_LOCKMODE_RW );
#endif

	MALI_DEBUG_PRINT(3, ("Session started\n"));
	MALI_SUCCESS;

cleanup:
	MALI_DEBUG_PRINT(2, ("Session startup failed\n"));
	/* i is index of subsystem which failed session begin, all indices before that has to be ended */
	/* end subsystem sessions in the reverse order they where started in */
	for (i = i - 1; i >= 0; --i)
	{
		if (NULL != subsystems[i]->session_end) subsystems[i]->session_end(session_data, &session_data->subsystem_data[i]);
	}

	_mali_osk_notification_queue_term(session_data->ioctl_queue);
	_mali_osk_free(session_data);
#ifdef MALI_SESSION_MEMORY_USAGE
	_mali_osk_free(session_data_list);
#endif

	/* return what the subsystem which failed session start returned */
    MALI_ERROR(err);
}
示例#13
0
void _mali_session_data_unlock(void)
{
	_mali_osk_lock_signal( session_data_lock, _MALI_OSK_LOCKMODE_RW );
}