_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping *map, void *target, int *odescriptor)
{
	_mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
	int new_descriptor;
	mali_mem_allocation *descriptor;
	struct mali_session_data *session;

	MALI_DEBUG_ASSERT_POINTER(map);
	MALI_DEBUG_ASSERT_POINTER(odescriptor);
	MALI_DEBUG_ASSERT_POINTER(target);

	_mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
	new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
	if (new_descriptor == map->current_nr_mappings) {
		/* no free descriptor, try to expand the table */
		mali_descriptor_table *new_table, * old_table;
		if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;

		map->current_nr_mappings += BITS_PER_LONG;
		new_table = descriptor_table_alloc(map->current_nr_mappings);
		if (NULL == new_table) goto unlock_and_exit;

		old_table = map->table;
		_mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
		_mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void *));
		map->table = new_table;
		descriptor_table_free(old_table);
	}

	/* we have found a valid descriptor, set the value and usage bit */
	_mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
	map->table->mappings[new_descriptor] = target;
	*odescriptor = new_descriptor;

	/* To calculate the mali mem usage for the session */
	descriptor = (mali_mem_allocation *)target;
	session = descriptor->session;

	MALI_DEBUG_ASSERT_POINTER(session);

	session->mali_mem_array[descriptor->type] += descriptor->size;
	if ((MALI_MEM_OS == descriptor->type || MALI_MEM_BLOCK == descriptor->type) &&
	    (session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK] > session->max_mali_mem_allocated)) {
		session->max_mali_mem_allocated = session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK];
	}
	err = _MALI_OSK_ERR_OK;

unlock_and_exit:
	_mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
	MALI_ERROR(err);
}
static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info *info)
{
	if (NULL != info) {
		/* 4096 for the page and 4 bytes for the address */
		const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
		const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
		const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;

		info->page_table_dump_size += dump_size_in_bytes;

		if (NULL != info->buffer) {
			if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);

			*info->buffer = phys_addr;
			info->buffer++;

			_mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
			info->buffer += page_size_in_elements;

			info->buffer_left -= dump_size_in_bytes;
		}
	}

	MALI_SUCCESS;
}
struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *args, u32 id)
{
	struct mali_gp_job *job;

	job = _mali_osk_malloc(sizeof(struct mali_gp_job));
	if (NULL != job)
	{
		_mali_osk_list_init(&job->list);
		job->session = session;
		job->id = id;
		job->user_id = args->user_job_ptr;
		_mali_osk_memcpy(job->frame_registers, args->frame_registers, sizeof(job->frame_registers));
		job->heap_current_addr = args->frame_registers[4];
		job->perf_counter_flag = args->perf_counter_flag;
		job->perf_counter_src0 = args->perf_counter_src0;
		job->perf_counter_src1 = args->perf_counter_src1;
		job->perf_counter_value0 = 0;
		job->perf_counter_value1 = 0;

		job->pid = _mali_osk_get_pid();
		job->tid = _mali_osk_get_tid();
		job->frame_builder_id = args->frame_builder_id;
		job->flush_id = args->flush_id;

		return job;
	}

	return NULL;
}
int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping *map, void *target)
{
	int descriptor = -1;/*-EFAULT;*/
	_mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
	descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
	if (descriptor == map->current_nr_mappings) {
		int nr_mappings_new;
		/* no free descriptor, try to expand the table */
		ump_descriptor_table *new_table;
		ump_descriptor_table *old_table = map->table;
		nr_mappings_new = map->current_nr_mappings * 2;

		if (map->current_nr_mappings >= map->max_nr_mappings_allowed) {
			descriptor = -1;
			goto unlock_and_exit;
		}

		new_table = descriptor_table_alloc(nr_mappings_new);
		if (NULL == new_table) {
			descriptor = -1;
			goto unlock_and_exit;
		}

		_mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
		_mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void *));
		map->table = new_table;
		map->current_nr_mappings = nr_mappings_new;
		descriptor_table_free(old_table);
	}

	/* we have found a valid descriptor, set the value and usage bit */
	_mali_osk_set_nonatomic_bit(descriptor, map->table->usage);
	map->table->mappings[descriptor] = target;

unlock_and_exit:
	_mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
	return descriptor;
}
_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *odescriptor)
{
	_mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
	int new_descriptor;

    MALI_DEBUG_ASSERT_POINTER(map);
    MALI_DEBUG_ASSERT_POINTER(odescriptor);

    _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
	new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
	if (new_descriptor == map->current_nr_mappings)
	{
		/* no free descriptor, try to expand the table */
		mali_descriptor_table * new_table, * old_table;
		if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;

        map->current_nr_mappings += BITS_PER_LONG;
		new_table = descriptor_table_alloc(map->current_nr_mappings);
		if (NULL == new_table) goto unlock_and_exit;

        old_table = map->table;
		_mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
		_mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
		map->table = new_table;
		descriptor_table_free(old_table);
	}

	/* we have found a valid descriptor, set the value and usage bit */
	_mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
	map->table->mappings[new_descriptor] = target;
	*odescriptor = new_descriptor;
    err = _MALI_OSK_ERR_OK;

unlock_and_exit:
    _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
    MALI_ERROR(err);
}
_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data)
{
	MALI_DEBUG_ASSERT_POINTER(data);

	if (NULL != mali_platform_device) {
		struct mali_gpu_device_data *os_data = NULL;

		os_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
		if (NULL != os_data) {
			/* Copy data from OS dependant struct to Mali neutral struct (identical!) */
			BUILD_BUG_ON(sizeof(*os_data) != sizeof(*data));
			_mali_osk_memcpy(data, os_data, sizeof(*os_data));

			return _MALI_OSK_ERR_OK;
		}
	}

	return _MALI_OSK_ERR_ITEM_NOT_FOUND;
}
_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args )
{
	_mali_osk_errcode_t err;
	_mali_osk_notification_t * notification;
    _mali_osk_notification_queue_t *queue;

    /* check input */
	MALI_DEBUG_ASSERT_POINTER(args);
    MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);

    queue = (_mali_osk_notification_queue_t *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_core_id);

	/* if the queue does not exist we're currently shutting down */
	if (NULL == queue)
	{
		MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
        args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
		MALI_SUCCESS;
	}

    /* receive a notification, might sleep */
	err = _mali_osk_notification_queue_receive(queue, &notification);
	if (_MALI_OSK_ERR_OK != err)
	{
        MALI_ERROR(err); /* errcode returned, pass on to caller */
    }

	/* copy the buffer to the user */
    args->type = (_mali_uk_notification_type)notification->notification_type;
    _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);

	/* finished with the notification */
	_mali_osk_notification_delete( notification );

    MALI_SUCCESS; /* all ok */
}
_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args )
{
	_mali_core_info * current_core;
	_mali_mem_info * current_mem;
	_mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
	void * current_write_pos, ** current_patch_pos;
    u32 adjust_ptr_base;

	/* check input */
	MALI_DEBUG_ASSERT_POINTER(args);
    MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
    MALI_CHECK_NON_NULL(args->system_info, _MALI_OSK_ERR_INVALID_ARGS);

	/* lock the system info */
	_mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );

	/* first check size */
	if (args->size < system_info_size) goto exit_when_locked;

	/* we build a copy of system_info in the user space buffer specified by the user and
     * patch up the pointers. The ukk_private members of _mali_uk_get_system_info_s may
     * indicate a different base address for patching the pointers (normally the
     * address of the provided system_info buffer would be used). This is helpful when
     * the system_info buffer needs to get copied to user space and the pointers need
     * to be in user space.
     */
    if (0 == args->ukk_private)
    {
        adjust_ptr_base = (u32)args->system_info;
    }
    else
    {
        adjust_ptr_base = args->ukk_private;
    }

	/* copy each struct into the buffer, and update its pointers */
	current_write_pos = (void *)args->system_info;

	/* first, the master struct */
	_mali_osk_memcpy(current_write_pos, system_info, sizeof(_mali_system_info));

	/* advance write pointer */
	current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_system_info));

	/* first we write the core info structs, patch starts at master's core_info pointer */
	current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, core_info));

	for (current_core = system_info->core_info; NULL != current_core; current_core = current_core->next)
	{

		/* patch the pointer pointing to this core */
		*current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));

		/* copy the core info */
		_mali_osk_memcpy(current_write_pos, current_core, sizeof(_mali_core_info));

		/* update patch pos */
		current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_core_info, next));

		/* advance write pos in memory */
		current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_core_info));
	}
	/* patching of last patch pos is not needed, since we wrote NULL there in the first place */

	/* then we write the mem info structs, patch starts at master's mem_info pointer */
	current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, mem_info));

	for (current_mem = system_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
	{
		/* patch the pointer pointing to this core */
		*current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));

		/* copy the core info */
		_mali_osk_memcpy(current_write_pos, current_mem, sizeof(_mali_mem_info));

		/* update patch pos */
		current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_mem_info, next));

		/* advance write pos in memory */
		current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_mem_info));
	}
	/* patching of last patch pos is not needed, since we wrote NULL there in the first place */

	err = _MALI_OSK_ERR_OK;
exit_when_locked:
	_mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
    MALI_ERROR(err);
}
Ejemplo n.º 9
0
UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks)
{
	ump_dd_mem * mem;
	unsigned long size_total = 0;
	int map_id;
	u32 i;

	/* Go through the input blocks and verify that they are sane */
	for (i=0; i < num_blocks; i++)
	{
		unsigned long addr = blocks[i].addr;
		unsigned long size = blocks[i].size;

		DBG_MSG(5, ("Adding physical memory to new handle. Address: 0x%08lx, size: %lu\n", addr, size));
		size_total += blocks[i].size;

		if (0 != UMP_ADDR_ALIGN_OFFSET(addr))
		{
			MSG_ERR(("Trying to create UMP memory from unaligned physical address. Address: 0x%08lx\n", addr));
			return UMP_DD_HANDLE_INVALID;
		}

		if (0 != UMP_ADDR_ALIGN_OFFSET(size))
		{
			MSG_ERR(("Trying to create UMP memory with unaligned size. Size: %lu\n", size));
			return UMP_DD_HANDLE_INVALID;
		}
	}

	/* Allocate the ump_dd_mem struct for this allocation */
	mem = _mali_osk_malloc(sizeof(*mem));
	if (NULL == mem)
	{
		DBG_MSG(1, ("Could not allocate ump_dd_mem in ump_dd_handle_create_from_phys_blocks()\n"));
		return UMP_DD_HANDLE_INVALID;
	}

	/* Find a secure ID for this allocation */
	_mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
	map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem);

	if (map_id < 0)
	{
		_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
		_mali_osk_free(mem);
		DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
		return UMP_DD_HANDLE_INVALID;
	}

	/* Now, make a copy of the block information supplied by the user */
	mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks);
	if (NULL == mem->block_array)
	{
		ump_descriptor_mapping_free(device.secure_id_map, map_id);
		_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
		_mali_osk_free(mem);
		DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n"));
		return UMP_DD_HANDLE_INVALID;
	}

	_mali_osk_memcpy(mem->block_array, blocks, sizeof(ump_dd_physical_block) * num_blocks);

	/* And setup the rest of the ump_dd_mem struct */
	_mali_osk_atomic_init(&mem->ref_count, 1);
	mem->secure_id = (ump_secure_id)map_id;
	mem->size_bytes = size_total;
	mem->nr_blocks = num_blocks;
	mem->backend_info = NULL;
	mem->ctx = NULL;
	mem->release_func = phys_blocks_release;
	/* For now UMP handles created by ump_dd_handle_create_from_phys_blocks() is forced to be Uncached */
	mem->is_cached = 0;
	mem->hw_device = _UMP_UK_USED_BY_CPU;
	mem->lock_usage = UMP_NOT_LOCKED;

	_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
	DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));

	return (ump_dd_handle)mem;
}
Ejemplo n.º 10
0
struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session,
				       _mali_uk_pp_start_job_s __user *uargs, u32 id)
{
	struct mali_pp_job *job;
	u32 perf_counter_flag;

	job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
	if (NULL != job) {
		if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) {
			goto fail;
		}

		if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) {
			MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
			goto fail;
		}

		if (!mali_pp_job_use_no_notification(job)) {
			job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
			if (NULL == job->finished_notification) goto fail;
		}

		perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);

		/* case when no counters came from user space
		 * so pass the debugfs / DS-5 provided global ones to the job object */
		if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
		      (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
			u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count);

			/* These counters apply for all virtual jobs, and where no per sub job counter is specified */
			job->uargs.perf_counter_src0 = pp_counter_src0;
			job->uargs.perf_counter_src1 = pp_counter_src1;

			/* We only copy the per sub job array if it is enabled with at least one counter */
			if (0 < sub_job_count) {
				job->perf_counter_per_sub_job_count = sub_job_count;
				_mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0));
				_mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1));
			}
		}

		_mali_osk_list_init(&job->list);
		job->session = session;
		_mali_osk_list_init(&job->session_list);
		job->id = id;

		job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
		job->pid = _mali_osk_get_pid();
		job->tid = _mali_osk_get_tid();

		job->num_memory_cookies = job->uargs.num_memory_cookies;
		if (job->num_memory_cookies > 0) {
			u32 size;
			u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies;

			if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) {
				MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n"));
				goto fail;
			}

			size = sizeof(*memory_cookies) * job->num_memory_cookies;

			job->memory_cookies = _mali_osk_malloc(size);
			if (NULL == job->memory_cookies) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size));
				goto fail;
			}

			if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
				goto fail;
			}

#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
			job->num_dma_bufs = job->num_memory_cookies;
			job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *));
			if (NULL == job->dma_bufs) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
				goto fail;
			}
#endif
		}

		/* Prepare DMA command buffer to start job, if it is virtual. */
		if (mali_pp_job_is_virtual_group_job(job)) {
			struct mali_pp_core *core;
			_mali_osk_errcode_t err =  mali_dma_get_cmd_buf(&job->dma_cmd_buf);

			if (_MALI_OSK_ERR_OK != err) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to allocate DMA command buffer\n"));
				goto fail;
			}

			core = mali_pp_scheduler_get_virtual_pp();
			MALI_DEBUG_ASSERT_POINTER(core);

			mali_pp_job_dma_cmd_prepare(core, job, 0, &job->dma_cmd_buf);
		}

		if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
			/* Not a valid job. */
			goto fail;
		}

		mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job);
		mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));

		return job;
	}

fail:
	if (NULL != job) {
		mali_pp_job_delete(job);
	}

	return NULL;
}