Beispiel #1
0
struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
{
	struct mali_soft_job_system *system;

	MALI_DEBUG_ASSERT_POINTER(session);

	system = (struct mali_soft_job_system *) _mali_osk_calloc(1, sizeof(struct mali_soft_job_system));
	if (NULL == system) {
		return NULL;
	}

	system->session = session;

	system->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
	if (NULL == system->lock) {
		mali_soft_job_system_destroy(system);
		return NULL;
	}
	system->lock_owner = 0;
	system->last_job_id = 0;

	_MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));

	return system;
}
static int _mali_gp_add_varying_allocations(struct mali_session_data *session,
		struct mali_gp_job *job,
		u32 *alloc,
		u32 num)
{
	int i = 0;
	struct mali_gp_allocation_node *alloc_node;
	mali_mem_allocation *mali_alloc = NULL;
	struct mali_vma_node *mali_vma_node = NULL;

	for (i = 0 ; i < num ; i++) {
		MALI_DEBUG_ASSERT(alloc[i]);
		alloc_node = _mali_osk_calloc(1, sizeof(struct mali_gp_allocation_node));
		if (alloc_node) {
			INIT_LIST_HEAD(&alloc_node->node);
			/* find mali allocation structure by vaddress*/
			mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, alloc[i], 0);

			if (likely(mali_vma_node)) {
				mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
				MALI_DEBUG_ASSERT(alloc[i] == mali_vma_node->vm_node.start);
			} else {
				MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,can't find allocation %d by address =0x%x, num=%d\n", i, alloc[i], num));
				MALI_DEBUG_ASSERT(0);
			}
			alloc_node->alloc = mali_alloc;
			/* add to gp job varying alloc list*/
			list_move(&alloc_node->node, &job->varying_alloc);
		} else
			goto fail;
mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
{
	mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));

	init_entries = MALI_PAD_INT(init_entries);
	max_entries = MALI_PAD_INT(max_entries);

	if (NULL != map)
	{
		map->table = descriptor_table_alloc(init_entries);
		if (NULL != map->table)
		{
#if !USING_MMU
            map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 20);
#else
            map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 116);
#endif
            if (NULL != map->lock)
            {
			    _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
			    map->max_nr_mappings_allowed = max_entries;
			    map->current_nr_mappings = init_entries;
			    return map;
            }
        	descriptor_table_free(map->table);
		}
		_mali_osk_free(map);
	}
	return NULL;
}
Beispiel #4
0
struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
{
	u32 i;
	struct mali_soft_job_system *system;
	struct mali_soft_job *job;

	MALI_DEBUG_ASSERT_POINTER(session);

	system = (struct mali_soft_job_system *) _mali_osk_calloc(1, sizeof(struct mali_soft_job_system));
	if (NULL == system) {
		return NULL;
	}

	system->session = session;

	system->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
	if (NULL == system->lock) {
		mali_soft_job_system_destroy(system);
		return NULL;
	}
	system->lock_owner = 0;

	_MALI_OSK_INIT_LIST_HEAD(&(system->jobs_free));
	_MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));

	for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
		job = &(system->jobs[i]);
		_mali_osk_list_add(&(job->system_list), &(system->jobs_free));
		job->system = system;
		job->state = MALI_SOFT_JOB_STATE_FREE;
		job->id = i;
	}

	return system;
}
Beispiel #5
0
mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
{
	mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));

	init_entries = MALI_PAD_INT(init_entries);
	max_entries = MALI_PAD_INT(max_entries);

	if (NULL != map)
	{
		map->table = descriptor_table_alloc(init_entries);
		if (NULL != map->table)
		{
            map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
            if (NULL != map->lock)
            {
			    _mali_osk_set_nonatomic_bit(0, map->table->usage); 
			    map->max_nr_mappings_allowed = max_entries;
			    map->current_nr_mappings = init_entries;
			    return map;
            }
        	descriptor_table_free(map->table);
		}
		_mali_osk_free(map);
	}
	return NULL;
}
struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
{
	struct mali_mmu_core* mmu = NULL;

	MALI_DEBUG_ASSERT_POINTER(resource);

	MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));

	mmu = _mali_osk_calloc(1,sizeof(struct mali_mmu_core));
	if (NULL != mmu)
	{
		if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE))
		{
			if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu))
			{
				if (is_virtual)
				{
					/* Skip reset and IRQ setup for virtual MMU */
					return mmu;
				}

				if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu))
				{
					/* Setup IRQ handlers (which will do IRQ probing if needed) */
					mmu->irq = _mali_osk_irq_init(resource->irq,
					                              mali_group_upper_half_mmu,
					                              group,
					                              mali_mmu_probe_trigger,
					                              mali_mmu_probe_ack,
					                              mmu,
					                              "mali_mmu_irq_handlers");
					if (NULL != mmu->irq)
					{
						return mmu;
					}
					else
					{
						MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
					}
				}
				mali_group_remove_mmu_core(group);
			}
			else
			{
				MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description));
			}
			mali_hw_core_delete(&mmu->hw_core);
		}

		_mali_osk_free(mmu);
	}
	else
	{
		MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
	}

	return NULL;
}
static ump_descriptor_table *descriptor_table_alloc(int count)
{
	ump_descriptor_table *table;

	table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG) + (sizeof(void *) * count));

	if (NULL != table) {
		table->usage = (u32 *)((u8 *)table + sizeof(ump_descriptor_table));
		table->mappings = (void **)((u8 *)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG));
	}

	return table;
}
struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource)
{
	struct mali_mmu_core* mmu = NULL;

	MALI_DEBUG_ASSERT_POINTER(resource);

	MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));

	mmu = _mali_osk_calloc(1,sizeof(struct mali_mmu_core));
	if (NULL != mmu)
	{
#ifdef CONFIG_ARCH_MESON6
		mmu->id = resource->mmu_id;
		MALI_DEBUG_PRINT(3, ("Mali MMU: mmu_id: %d\n", resource->mmu_id));
#endif
		if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE))
		{
			if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu))
			{
				/* Setup IRQ handlers (which will do IRQ probing if needed) */
				mmu->irq = _mali_osk_irq_init(resource->irq,
							      mali_mmu_upper_half,
							      mali_mmu_bottom_half,
							      mali_mmu_probe_trigger,
							      mali_mmu_probe_ack,
							      mmu,
							      "mali_mmu_irq_handlers");
				if (NULL != mmu->irq)
				{
					return mmu;
				}
				else
				{
					MALI_PRINT_ERROR(("Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
				}
			}
			mali_hw_core_delete(&mmu->hw_core);
		}

		_mali_osk_free(mmu);
	}
	else
	{
		MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
	}

	return NULL;
}
/**
 * Creates a sync fence tracker and a sync fence.  Adds sync fence tracker to Timeline system and
 * returns sync fence.  The sync fence will be signaled when the sync fence tracker is activated.
 *
 * @param timeline Timeline.
 * @param point Point on timeline.
 * @return Sync fence that will be signaled when tracker is activated.
 */
static struct sync_fence *mali_timeline_sync_fence_create_and_add_tracker(struct mali_timeline *timeline, mali_timeline_point point)
{
	struct mali_timeline_sync_fence_tracker *sync_fence_tracker;
	struct sync_fence                       *sync_fence;
	struct mali_timeline_fence               fence;

	MALI_DEBUG_ASSERT_POINTER(timeline);
	MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);

	/* Allocate sync fence tracker. */
	sync_fence_tracker = _mali_osk_calloc(1, sizeof(struct mali_timeline_sync_fence_tracker));
	if (NULL == sync_fence_tracker) {
		MALI_PRINT_ERROR(("Mali Timeline: sync_fence_tracker allocation failed\n"));
		return NULL;
	}

	/* Create sync flag. */
	MALI_DEBUG_ASSERT_POINTER(timeline->sync_tl);
	sync_fence_tracker->flag = mali_sync_flag_create(timeline->sync_tl, point);
	if (NULL == sync_fence_tracker->flag) {
		MALI_PRINT_ERROR(("Mali Timeline: sync_flag creation failed\n"));
		_mali_osk_free(sync_fence_tracker);
		return NULL;
	}

	/* Create sync fence from sync flag. */
	sync_fence = mali_sync_flag_create_fence(sync_fence_tracker->flag);
	if (NULL == sync_fence) {
		MALI_PRINT_ERROR(("Mali Timeline: sync_fence creation failed\n"));
		mali_sync_flag_put(sync_fence_tracker->flag);
		_mali_osk_free(sync_fence_tracker);
		return NULL;
	}

	/* Setup fence for tracker. */
	_mali_osk_memset(&fence, 0, sizeof(struct mali_timeline_fence));
	fence.sync_fd = -1;
	fence.points[timeline->id] = point;

	/* Finally, add the tracker to Timeline system. */
	mali_timeline_tracker_init(&sync_fence_tracker->tracker, MALI_TIMELINE_TRACKER_SYNC, &fence, sync_fence_tracker);
	point = mali_timeline_system_add_tracker(timeline->system, &sync_fence_tracker->tracker, MALI_TIMELINE_NONE);
	MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);

	return sync_fence;
}
struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order)
{
	struct mali_spinlock_reentrant *spinlock;

	spinlock = _mali_osk_calloc(1, sizeof(struct mali_spinlock_reentrant));
	if (NULL == spinlock) {
		return NULL;
	}

	spinlock->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, lock_order);
	if (NULL == spinlock->lock) {
		mali_spinlock_reentrant_term(spinlock);
		return NULL;
	}

	return spinlock;
}
struct mali_page_directory *mali_mmu_pagedir_alloc(void)
{
	struct mali_page_directory *pagedir;

	pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
	if(NULL == pagedir) {
		return NULL;
	}

	if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&pagedir->page_directory, &pagedir->page_directory_mapped)) {
		_mali_osk_free(pagedir);
		return NULL;
	}

	/* Zero page directory */
	fill_page(pagedir->page_directory_mapped, 0);

	return pagedir;
}
ump_descriptor_mapping *ump_descriptor_mapping_create(int init_entries, int max_entries)
{
	ump_descriptor_mapping *map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping));

	init_entries = MALI_PAD_INT(init_entries);
	max_entries = MALI_PAD_INT(max_entries);

	if (NULL != map) {
		map->table = descriptor_table_alloc(init_entries);
		if (NULL != map->table) {
			map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
			if (NULL != map->lock) {
				_mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
				map->max_nr_mappings_allowed = max_entries;
				map->current_nr_mappings = init_entries;
				return map;
			}
			descriptor_table_free(map->table);
		}
		_mali_osk_free(map);
	}
	return NULL;
}
struct mali_page_directory *mali_mmu_pagedir_alloc(void)
{
	struct mali_page_directory *pagedir;
	_mali_osk_errcode_t err;
	mali_dma_addr phys;

	pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
	if (NULL == pagedir) {
		return NULL;
	}

	err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped);
	if (_MALI_OSK_ERR_OK != err) {
		_mali_osk_free(pagedir);
		return NULL;
	}

	pagedir->page_directory = (u32)phys;

	/* Zero page directory */
	fill_page(pagedir->page_directory_mapped, 0);

	return pagedir;
}
_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
{
	memory_engine * engine = (memory_engine*)mem_engine;

	MALI_DEBUG_ASSERT_POINTER(engine);
	MALI_DEBUG_ASSERT_POINTER(descriptor);
	MALI_DEBUG_ASSERT_POINTER(physical_allocators);
	/* ASSERT that the list member has been initialized, even if it won't be
	 * used for tracking. We need it to be initialized to see if we need to
	 * delete it from a list in the release function. */
	MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );

	if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
	{
		_mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
		if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
		{
			res = engine->process_address->allocate(descriptor);
		}
		if ( _MALI_OSK_ERR_OK == res )
		{
			/* address space setup OK, commit physical memory to the allocation */
			mali_physical_memory_allocator * active_allocator = physical_allocators;
			struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
			u32 offset = 0;

			while ( NULL != active_allocator )
			{
				switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
				{
					case MALI_MEM_ALLOC_FINISHED:
						if ( NULL != tracking_list )
						{
							/* Insert into the memory session list */
							/* ASSERT that it is not already part of a list */
							MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
							_mali_osk_list_add( &descriptor->list, tracking_list );
						}

						MALI_SUCCESS; /* all done */
					case MALI_MEM_ALLOC_NONE:
						/* reuse current active_allocation_tracker */
						MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
											  ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
											  ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
						active_allocator = active_allocator->next;
						break;
					case MALI_MEM_ALLOC_PARTIAL:
						if (NULL != active_allocator->next)
						{
							/* need a new allocation tracker */
							active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
							if (NULL != active_allocation_tracker->next)
							{
								active_allocation_tracker = active_allocation_tracker->next;
								MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
													  ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
													  ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
								active_allocator = active_allocator->next;
								break;
							}
						}
					   /* FALL THROUGH */
					case MALI_MEM_ALLOC_INTERNAL_FAILURE:
					   active_allocator = NULL; /* end the while loop */
					   break;
				}
			}

			MALI_PRINT(("Memory allocate failed, could not allocate size %d kB.\n", descriptor->size/1024));

			/* allocation failure, start cleanup */
			/* loop over any potential partial allocations */
			active_allocation_tracker = &descriptor->physical_allocation;
			while (NULL != active_allocation_tracker)
			{
				/* handle blank trackers which will show up during failure */
				if (NULL != active_allocation_tracker->release)
				{
					active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
				}
				active_allocation_tracker = active_allocation_tracker->next;
			}

			/* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
			for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
			{
				void * buf = active_allocation_tracker;
				active_allocation_tracker = active_allocation_tracker->next;
				_mali_osk_free(buf);
			}

			/* release the address spaces */

			if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
			{
				engine->process_address->release(descriptor);
			}
		}
		engine->mali_address->release(descriptor);
	}

	MALI_ERROR(_MALI_OSK_ERR_FAULT);
}
/**
 * Allocate a fence waiter tracker.
 *
 * @return New fence waiter if successful, NULL if not.
 */
static struct mali_timeline_fence_wait_tracker *mali_timeline_fence_wait_tracker_alloc(void)
{
	return (struct mali_timeline_fence_wait_tracker *) _mali_osk_calloc(1, sizeof(struct mali_timeline_fence_wait_tracker));
}
struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id)
{
	struct mali_pp_job *job;
	u32 perf_counter_flag;

	job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
	if (NULL != job)
	{
		if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s)))
		{
			goto fail;
		}

		if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS)
		{
			MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
			goto fail;
		}

		if (!mali_pp_job_use_no_notification(job))
		{
			job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
			if (NULL == job->finished_notification) goto fail;
		}

		perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);

		/* case when no counters came from user space
		 * so pass the debugfs / DS-5 provided global ones to the job object */
		if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
				(perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)))
		{
			mali_pp_job_set_perf_counter_src0(job, mali_pp_job_get_pp_counter_src0());
			mali_pp_job_set_perf_counter_src1(job, mali_pp_job_get_pp_counter_src1());
		}

		_mali_osk_list_init(&job->list);
		job->session = session;
		_mali_osk_list_init(&job->session_list);
		job->id = id;

		job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
		job->pid = _mali_osk_get_pid();
		job->tid = _mali_osk_get_tid();

		job->num_memory_cookies = job->uargs.num_memory_cookies;
		if (job->num_memory_cookies > 0)
		{
			u32 size;

			if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings)
			{
				MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n"));
				goto fail;
			}

			size = sizeof(*job->uargs.memory_cookies) * job->num_memory_cookies;

			job->memory_cookies = _mali_osk_malloc(size);
			if (NULL == job->memory_cookies)
			{
				MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size));
				goto fail;
			}

			if (0 != _mali_osk_copy_from_user(job->memory_cookies, job->uargs.memory_cookies, size))
			{
				MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
				goto fail;
			}

#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
			job->num_dma_bufs = job->num_memory_cookies;
			job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *));
			if (NULL == job->dma_bufs)
			{
				MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
				goto fail;
			}
#endif
		}
		else
		{
			job->memory_cookies = NULL;
		}

		return job;
	}

fail:
	if (NULL != job)
	{
		mali_pp_job_delete(job);
	}

	return NULL;
}
Beispiel #17
0
_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
{
	ump_session_data * session_data = NULL;
	ump_dd_mem *new_allocation = NULL;
	ump_session_memory_list_element * session_memory_element = NULL;
	int map_id;

	DEBUG_ASSERT_POINTER( user_interaction );
	DEBUG_ASSERT_POINTER( user_interaction->ctx );

	session_data = (ump_session_data *) user_interaction->ctx;

	session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
	if (NULL == session_memory_element)
	{
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return _MALI_OSK_ERR_NOMEM;
	}


	new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem));
	if (NULL==new_allocation)
	{
		_mali_osk_free(session_memory_element);
		DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
		return _MALI_OSK_ERR_NOMEM;
	}

	/* Create a secure ID for this allocation */
	_mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
	map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation);

	if (map_id < 0)
	{
		_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
		_mali_osk_free(session_memory_element);
		_mali_osk_free(new_allocation);
		DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
		return - _MALI_OSK_ERR_INVALID_FUNC;
	}

	/* Initialize the part of the new_allocation that we know so for */
	new_allocation->secure_id = (ump_secure_id)map_id;
	_mali_osk_atomic_init(&new_allocation->ref_count,1);
	if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) )
		 new_allocation->is_cached = 0;
	else new_allocation->is_cached = 1;

	/* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */
	if (0 == user_interaction->size)
	{
		user_interaction->size = 1; /* emulate by actually allocating the minimum block size */
	}

	new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */
	new_allocation->lock_usage = UMP_NOT_LOCKED;

	/* Now, ask the active memory backend to do the actual memory allocation */
	if (!device.backend->allocate( device.backend->ctx, new_allocation ) )
	{
		DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size));
		ump_descriptor_mapping_free(device.secure_id_map, map_id);
		_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
		_mali_osk_free(new_allocation);
		_mali_osk_free(session_memory_element);
		return _MALI_OSK_ERR_INVALID_FUNC;
	}
	new_allocation->hw_device = _UMP_UK_USED_BY_CPU;
	new_allocation->ctx = device.backend->ctx;
	new_allocation->release_func = device.backend->release;

	_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);

	/* Initialize the session_memory_element, and add it to the session object */
	session_memory_element->mem = new_allocation;
	_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	_mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
	_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);

	user_interaction->secure_id = new_allocation->secure_id;
	user_interaction->size = new_allocation->size_bytes;
	DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes));

	return _MALI_OSK_ERR_OK;
}
int ump_dmabuf_import_wrapper(u32 __user *argument,
				struct ump_session_data  *session_data)
{
	ump_session_memory_list_element *session = NULL;
	struct ump_uk_dmabuf ump_dmabuf;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block *blocks;
	struct dma_buf_attachment *attach;
	struct dma_buf *dma_buf;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	unsigned long block_size;
	/* FIXME */
	struct device dev;
	unsigned int i = 0, npages;
	int ret;

	/* Sanity check input parameters */
	if (!argument || !session_data) {
		MSG_ERR(("NULL parameter.\n"));
		return -EINVAL;
	}

	if (copy_from_user(&ump_dmabuf, argument,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_from_user() failed.\n"));
		return -EFAULT;
	}

	dma_buf = dma_buf_get(ump_dmabuf.fd);
	if (IS_ERR(dma_buf))
		return PTR_ERR(dma_buf);

	/*
	 * check whether dma_buf imported already exists or not.
	 *
	 * TODO
	 * if already imported then dma_buf_put() should be called
	 * and then just return dma_buf imported.
	 */

	attach = dma_buf_attach(dma_buf, &dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto err_dma_buf_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_dma_buf_detach;
	}

	npages = sgt->nents;

	/* really need? */
	ump_dmabuf.ctx = (void *)session_data;

	block_size = sizeof(ump_dd_physical_block) * npages;

	blocks = (ump_dd_physical_block *)_mali_osk_malloc(block_size);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks\n"));
		ret = -ENOMEM;
		goto err_dmu_buf_unmap;
	}

	sgl = sgt->sgl;

	while (i < npages) {
		blocks[i].addr = sg_phys(sgl);
		blocks[i].size = sg_dma_len(sgl);
		sgl = sg_next(sgl);
		i++;
	}

	/*
	 * Initialize the session memory list element, and add it
	 * to the session object
	 */
	session = _mali_osk_calloc(1, sizeof(*session));
	if (!session) {
		DBG_MSG(1, ("Failed to allocate session.\n"));
		ret = -EFAULT;
		goto err_free_block;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, i);
	if (UMP_DD_HANDLE_INVALID == ump_handle) {
		DBG_MSG(1, ("Failed to create ump handle.\n"));
		ret = -EFAULT;
		goto err_free_session;
	}

	session->mem = (ump_dd_mem *)ump_handle;

	_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	_mali_osk_list_add(&(session->list),
			&(session_data->list_head_session_memory_list));
	_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);

	_mali_osk_free(blocks);

	ump_dmabuf.ump_handle = (uint32_t)ump_handle;
	ump_dmabuf.size = ump_dd_size_get(ump_handle);

	if (copy_to_user(argument, &ump_dmabuf,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_to_user() failed.\n"));
		ret =  -EFAULT;
		goto err_release_ump_handle;
	}

	return 0;

err_release_ump_handle:
	ump_dd_reference_release(ump_handle);
err_free_session:
	_mali_osk_free(session);
err_free_block:
	_mali_osk_free(blocks);
err_dmu_buf_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_dma_buf_detach:
	dma_buf_detach(dma_buf, attach);
err_dma_buf_put:
	dma_buf_put(dma_buf);
	return ret;
}
/*
 * IOCTL operation; Import fd to  UMP memory
 */
int ump_ion_import_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
{
	_ump_uk_ion_import_s user_interaction;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block * blocks;
	unsigned long num_blocks;
	struct ion_handle *ion_hnd;
	struct scatterlist *sg;
	struct scatterlist *sg_ion;
	unsigned long i = 0;

	ump_session_memory_list_element * session_memory_element = NULL;
	if (ion_client_ump==NULL)
	    ion_client_ump = ion_client_create(ion_exynos, -1, "ump");

	/* Sanity check input parameters */
	if (NULL == argument || NULL == session_data)
	{
		MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
		return -ENOTTY;
	}

	/* Copy the user space memory to kernel space (so we safely can read it) */
	if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
	{
		MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	user_interaction.ctx = (void *) session_data;

	/* translate fd to secure ID*/
	ion_hnd = ion_import_fd(ion_client_ump, user_interaction.ion_fd);
	sg_ion = ion_map_dma(ion_client_ump,ion_hnd);

	blocks = (ump_dd_physical_block*)_mali_osk_malloc(sizeof(ump_dd_physical_block)*1024);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks in ump_ioctl_allocate()\n"));
		return -ENOMEM;
	}

	sg = sg_ion;
	do {
		blocks[i].addr = sg_phys(sg);
		blocks[i].size = sg_dma_len(sg);
		i++;
		if (i>=1024) {
			_mali_osk_free(blocks);
			MSG_ERR(("ion_import fail() in ump_ioctl_allocate()\n"));
			return -EFAULT;
		}
		sg = sg_next(sg);
	} while(sg);

	num_blocks = i;

	/* Initialize the session_memory_element, and add it to the session object */
	session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));

	if (NULL == session_memory_element)
	{
		_mali_osk_free(blocks);
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks);
	if (UMP_DD_HANDLE_INVALID == ump_handle)
	{
		_mali_osk_free(session_memory_element);
		_mali_osk_free(blocks);
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	session_memory_element->mem = (ump_dd_mem*)ump_handle;
	_mali_osk_mutex_wait(session_data->lock);
	_mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
	_mali_osk_mutex_signal(session_data->lock);
	ion_unmap_dma(ion_client_ump,ion_hnd);
	ion_free(ion_client_ump, ion_hnd);

	_mali_osk_free(blocks);

	user_interaction.secure_id = ump_dd_secure_id_get(ump_handle);
	user_interaction.size = ump_dd_size_get(ump_handle);
	user_interaction.ctx = NULL;

	if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
	{
		/* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */

		MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));

		return -EFAULT;
	}
	return 0; /* success */
}
struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session,
				       _mali_uk_pp_start_job_s __user *uargs, u32 id)
{
	struct mali_pp_job *job;
	u32 perf_counter_flag;

	job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
	if (NULL != job) {
		if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) {
			goto fail;
		}

		if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) {
			MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
			goto fail;
		}

		if (!mali_pp_job_use_no_notification(job)) {
			job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
			if (NULL == job->finished_notification) goto fail;
		}

		perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);

		/* case when no counters came from user space
		 * so pass the debugfs / DS-5 provided global ones to the job object */
		if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
		      (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
			u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count);

			/* These counters apply for all virtual jobs, and where no per sub job counter is specified */
			job->uargs.perf_counter_src0 = pp_counter_src0;
			job->uargs.perf_counter_src1 = pp_counter_src1;

			/* We only copy the per sub job array if it is enabled with at least one counter */
			if (0 < sub_job_count) {
				job->perf_counter_per_sub_job_count = sub_job_count;
				_mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0));
				_mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1));
			}
		}

		_mali_osk_list_init(&job->list);
		job->session = session;
		_mali_osk_list_init(&job->session_list);
		job->id = id;

		job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
		job->pid = _mali_osk_get_pid();
		job->tid = _mali_osk_get_tid();

		job->num_memory_cookies = job->uargs.num_memory_cookies;
		if (job->num_memory_cookies > 0) {
			u32 size;
			u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies;

			if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) {
				MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n"));
				goto fail;
			}

			size = sizeof(*memory_cookies) * job->num_memory_cookies;

			job->memory_cookies = _mali_osk_malloc(size);
			if (NULL == job->memory_cookies) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size));
				goto fail;
			}

			if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
				goto fail;
			}

#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
			job->num_dma_bufs = job->num_memory_cookies;
			job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *));
			if (NULL == job->dma_bufs) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
				goto fail;
			}
#endif
		}

		/* Prepare DMA command buffer to start job, if it is virtual. */
		if (mali_pp_job_is_virtual_group_job(job)) {
			struct mali_pp_core *core;
			_mali_osk_errcode_t err =  mali_dma_get_cmd_buf(&job->dma_cmd_buf);

			if (_MALI_OSK_ERR_OK != err) {
				MALI_PRINT_ERROR(("Mali PP job: Failed to allocate DMA command buffer\n"));
				goto fail;
			}

			core = mali_pp_scheduler_get_virtual_pp();
			MALI_DEBUG_ASSERT_POINTER(core);

			mali_pp_job_dma_cmd_prepare(core, job, 0, &job->dma_cmd_buf);
		}

		if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
			/* Not a valid job. */
			goto fail;
		}

		mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job);
		mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));

		return job;
	}

fail:
	if (NULL != job) {
		mali_pp_job_delete(job);
	}

	return NULL;
}
struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id)
{
	struct mali_pp_core *core = NULL;

	MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
	MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));

	if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES) {
		MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n"));
		return NULL;
	}

	core = _mali_osk_calloc(1, sizeof(struct mali_pp_core));
	if (NULL != core) {
		core->core_id = mali_global_num_pp_cores;
		core->bcast_id = bcast_id;

		if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK)) {
			_mali_osk_errcode_t ret;

			if (!is_virtual) {
				ret = mali_pp_reset(core);
			} else {
				ret = _MALI_OSK_ERR_OK;
			}

			if (_MALI_OSK_ERR_OK == ret) {
				ret = mali_group_add_pp_core(group, core);
				if (_MALI_OSK_ERR_OK == ret) {
					/* Setup IRQ handlers (which will do IRQ probing if needed) */
					MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq);

					core->irq = _mali_osk_irq_init(resource->irq,
								       mali_group_upper_half_pp,
								       group,
								       mali_pp_irq_probe_trigger,
								       mali_pp_irq_probe_ack,
								       core,
								       resource->description);
					if (NULL != core->irq) {
						mali_global_pp_cores[mali_global_num_pp_cores] = core;
						mali_global_num_pp_cores++;

						return core;
					} else {
						MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description));
					}
					mali_group_remove_pp_core(group);
				} else {
					MALI_PRINT_ERROR(("Mali PP: Failed to add core %s to group\n", core->hw_core.description));
				}
			}
			mali_hw_core_delete(&core->hw_core);
		}

		_mali_osk_free(core);
	} else {
		MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n"));
	}

	return NULL;
}
int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs)
{
	_mali_uk_profiling_control_set_s kargs;
	_mali_osk_errcode_t err;
	u8 *kernel_control_data = NULL;
	u8 *kernel_response_data = NULL;

	MALI_CHECK_NON_NULL(uargs, -EINVAL);

	if (0 != get_user(kargs.control_packet_size, &uargs->control_packet_size)) return -EFAULT;
	if (0 != get_user(kargs.response_packet_size, &uargs->response_packet_size)) return -EFAULT;

	kargs.ctx = (uintptr_t)session_data;

	if (0 !=  kargs.control_packet_size) {

		kernel_control_data = _mali_osk_calloc(1, kargs.control_packet_size);
		if (NULL == kernel_control_data) {
			return -ENOMEM;
		}

		MALI_DEBUG_ASSERT(0 != kargs.response_packet_size);

		kernel_response_data = _mali_osk_calloc(1, kargs.response_packet_size);
		if (NULL == kernel_response_data) {
			_mali_osk_free(kernel_control_data);
			return -ENOMEM;
		}

		kargs.control_packet_data = (uintptr_t)kernel_control_data;
		kargs.response_packet_data = (uintptr_t)kernel_response_data;

		if (0 != copy_from_user((void *)(uintptr_t)kernel_control_data, (void *)(uintptr_t)uargs->control_packet_data, kargs.control_packet_size)) {
			_mali_osk_free(kernel_control_data);
			_mali_osk_free(kernel_response_data);
			return -EFAULT;
		}

		err = _mali_ukk_profiling_control_set(&kargs);
		if (_MALI_OSK_ERR_OK != err) {
			_mali_osk_free(kernel_control_data);
			_mali_osk_free(kernel_response_data);
			return map_errcode(err);
		}

		if (0 != kargs.response_packet_size && 0 != copy_to_user(((void *)(uintptr_t)uargs->response_packet_data), ((void *)(uintptr_t)kargs.response_packet_data), kargs.response_packet_size)) {
			_mali_osk_free(kernel_control_data);
			_mali_osk_free(kernel_response_data);
			return -EFAULT;
		}

		if (0 != put_user(kargs.response_packet_size, &uargs->response_packet_size)) {
			_mali_osk_free(kernel_control_data);
			_mali_osk_free(kernel_response_data);
			return -EFAULT;
		}

		_mali_osk_free(kernel_control_data);
		_mali_osk_free(kernel_response_data);
	} else {

		err = _mali_ukk_profiling_control_set(&kargs);
		if (_MALI_OSK_ERR_OK != err) {
			return map_errcode(err);
		}

	}
	return 0;
}