Exemplo n.º 1
0
void mali_session_add(struct mali_session_data *session)
{
	mali_session_lock();
	_mali_osk_list_add(&session->link, &mali_sessions);
	mali_session_count++;
	mali_session_unlock();
}
Exemplo n.º 2
0
struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
{
	u32 i;
	struct mali_soft_job_system *system;
	struct mali_soft_job *job;

	MALI_DEBUG_ASSERT_POINTER(session);

	system = (struct mali_soft_job_system *) _mali_osk_calloc(1, sizeof(struct mali_soft_job_system));
	if (NULL == system) {
		return NULL;
	}

	system->session = session;

	system->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
	if (NULL == system->lock) {
		mali_soft_job_system_destroy(system);
		return NULL;
	}
	system->lock_owner = 0;

	_MALI_OSK_INIT_LIST_HEAD(&(system->jobs_free));
	_MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));

	for (i = 0; i < MALI_MAX_NUM_SOFT_JOBS; ++i) {
		job = &(system->jobs[i]);
		_mali_osk_list_add(&(job->system_list), &(system->jobs_free));
		job->system = system;
		job->state = MALI_SOFT_JOB_STATE_FREE;
		job->id = i;
	}

	return system;
}
Exemplo n.º 3
0
struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job)
{
	struct mali_soft_job *job;
	_mali_osk_notification_t *notification = NULL;

	MALI_DEBUG_ASSERT_POINTER(system);
	MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) ||
			  (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type));

	notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
	if (unlikely(NULL == notification)) {
		MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
		return NULL;
	}

	job = _mali_osk_malloc(sizeof(struct mali_soft_job));
	if (unlikely(NULL == job)) {
		MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n"));
		return NULL;
	}

	mali_soft_job_system_lock(system);

	job->system = system;
	job->id = system->last_job_id++;
	job->state = MALI_SOFT_JOB_STATE_ALLOCATED;

	_mali_osk_list_add(&(job->system_list), &(system->jobs_used));

	job->type = type;
	job->user_job = user_job;
	job->activated = MALI_FALSE;

	job->activated_notification = notification;

	_mali_osk_atomic_init(&job->refcount, 1);

	MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
	MALI_DEBUG_ASSERT(system == job->system);
	MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);

	mali_soft_job_system_unlock(system);

	return job;
}
/*
 * IOCTL operation; Import fd to  UMP memory
 */
int ump_ion_import_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
{
	_ump_uk_ion_import_s user_interaction;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block * blocks;
	unsigned long num_blocks;
	struct ion_handle *ion_hnd;
	struct scatterlist *sg;
	struct scatterlist *sg_ion;
	unsigned long i = 0;

	ump_session_memory_list_element * session_memory_element = NULL;
	if (ion_client_ump==NULL)
	    ion_client_ump = ion_client_create(ion_exynos, -1, "ump");

	/* Sanity check input parameters */
	if (NULL == argument || NULL == session_data)
	{
		MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
		return -ENOTTY;
	}

	/* Copy the user space memory to kernel space (so we safely can read it) */
	if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
	{
		MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	user_interaction.ctx = (void *) session_data;

	/* translate fd to secure ID*/
	ion_hnd = ion_import_fd(ion_client_ump, user_interaction.ion_fd);
	sg_ion = ion_map_dma(ion_client_ump,ion_hnd);

	blocks = (ump_dd_physical_block*)_mali_osk_malloc(sizeof(ump_dd_physical_block)*1024);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks in ump_ioctl_allocate()\n"));
		return -ENOMEM;
	}

	sg = sg_ion;
	do {
		blocks[i].addr = sg_phys(sg);
		blocks[i].size = sg_dma_len(sg);
		i++;
		if (i>=1024) {
			_mali_osk_free(blocks);
			MSG_ERR(("ion_import fail() in ump_ioctl_allocate()\n"));
			return -EFAULT;
		}
		sg = sg_next(sg);
	} while(sg);

	num_blocks = i;

	/* Initialize the session_memory_element, and add it to the session object */
	session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));

	if (NULL == session_memory_element)
	{
		_mali_osk_free(blocks);
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks);
	if (UMP_DD_HANDLE_INVALID == ump_handle)
	{
		_mali_osk_free(session_memory_element);
		_mali_osk_free(blocks);
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	session_memory_element->mem = (ump_dd_mem*)ump_handle;
	_mali_osk_mutex_wait(session_data->lock);
	_mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
	_mali_osk_mutex_signal(session_data->lock);
	ion_unmap_dma(ion_client_ump,ion_hnd);
	ion_free(ion_client_ump, ion_hnd);

	_mali_osk_free(blocks);

	user_interaction.secure_id = ump_dd_secure_id_get(ump_handle);
	user_interaction.size = ump_dd_size_get(ump_handle);
	user_interaction.ctx = NULL;

	if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
	{
		/* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */

		MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));

		return -EFAULT;
	}
	return 0; /* success */
}
Exemplo n.º 5
0
_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
{
	ump_session_data * session_data = NULL;
	ump_dd_mem *new_allocation = NULL;
	ump_session_memory_list_element * session_memory_element = NULL;
	int map_id;

	DEBUG_ASSERT_POINTER( user_interaction );
	DEBUG_ASSERT_POINTER( user_interaction->ctx );

	session_data = (ump_session_data *) user_interaction->ctx;

	session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
	if (NULL == session_memory_element)
	{
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return _MALI_OSK_ERR_NOMEM;
	}


	new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem));
	if (NULL==new_allocation)
	{
		_mali_osk_free(session_memory_element);
		DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
		return _MALI_OSK_ERR_NOMEM;
	}

	/* Create a secure ID for this allocation */
	_mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
	map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation);

	if (map_id < 0)
	{
		_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
		_mali_osk_free(session_memory_element);
		_mali_osk_free(new_allocation);
		DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
		return - _MALI_OSK_ERR_INVALID_FUNC;
	}

	/* Initialize the part of the new_allocation that we know so for */
	new_allocation->secure_id = (ump_secure_id)map_id;
	_mali_osk_atomic_init(&new_allocation->ref_count,1);
	if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) )
		 new_allocation->is_cached = 0;
	else new_allocation->is_cached = 1;

	/* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */
	if (0 == user_interaction->size)
	{
		user_interaction->size = 1; /* emulate by actually allocating the minimum block size */
	}

	new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */
	new_allocation->lock_usage = UMP_NOT_LOCKED;

	/* Now, ask the active memory backend to do the actual memory allocation */
	if (!device.backend->allocate( device.backend->ctx, new_allocation ) )
	{
		DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size));
		ump_descriptor_mapping_free(device.secure_id_map, map_id);
		_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
		_mali_osk_free(new_allocation);
		_mali_osk_free(session_memory_element);
		return _MALI_OSK_ERR_INVALID_FUNC;
	}
	new_allocation->hw_device = _UMP_UK_USED_BY_CPU;
	new_allocation->ctx = device.backend->ctx;
	new_allocation->release_func = device.backend->release;

	_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);

	/* Initialize the session_memory_element, and add it to the session object */
	session_memory_element->mem = new_allocation;
	_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	_mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
	_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);

	user_interaction->secure_id = new_allocation->secure_id;
	user_interaction->size = new_allocation->size_bytes;
	DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes));

	return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
{
	memory_engine * engine = (memory_engine*)mem_engine;

	MALI_DEBUG_ASSERT_POINTER(engine);
	MALI_DEBUG_ASSERT_POINTER(descriptor);
	MALI_DEBUG_ASSERT_POINTER(physical_allocators);
	/* ASSERT that the list member has been initialized, even if it won't be
	 * used for tracking. We need it to be initialized to see if we need to
	 * delete it from a list in the release function. */
	MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );

	if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
	{
		_mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
		if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
		{
			res = engine->process_address->allocate(descriptor);
		}
		if ( _MALI_OSK_ERR_OK == res )
		{
			/* address space setup OK, commit physical memory to the allocation */
			mali_physical_memory_allocator * active_allocator = physical_allocators;
			struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
			u32 offset = 0;

			while ( NULL != active_allocator )
			{
				switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
				{
					case MALI_MEM_ALLOC_FINISHED:
						if ( NULL != tracking_list )
						{
							/* Insert into the memory session list */
							/* ASSERT that it is not already part of a list */
							MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
							_mali_osk_list_add( &descriptor->list, tracking_list );
						}

						MALI_SUCCESS; /* all done */
					case MALI_MEM_ALLOC_NONE:
						/* reuse current active_allocation_tracker */
						MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
											  ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
											  ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
						active_allocator = active_allocator->next;
						break;
					case MALI_MEM_ALLOC_PARTIAL:
						if (NULL != active_allocator->next)
						{
							/* need a new allocation tracker */
							active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
							if (NULL != active_allocation_tracker->next)
							{
								active_allocation_tracker = active_allocation_tracker->next;
								MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
													  ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
													  ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
								active_allocator = active_allocator->next;
								break;
							}
						}
					   /* FALL THROUGH */
					case MALI_MEM_ALLOC_INTERNAL_FAILURE:
					   active_allocator = NULL; /* end the while loop */
					   break;
				}
			}

			MALI_PRINT(("Memory allocate failed, could not allocate size %d kB.\n", descriptor->size/1024));

			/* allocation failure, start cleanup */
			/* loop over any potential partial allocations */
			active_allocation_tracker = &descriptor->physical_allocation;
			while (NULL != active_allocation_tracker)
			{
				/* handle blank trackers which will show up during failure */
				if (NULL != active_allocation_tracker->release)
				{
					active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
				}
				active_allocation_tracker = active_allocation_tracker->next;
			}

			/* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
			for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
			{
				void * buf = active_allocation_tracker;
				active_allocation_tracker = active_allocation_tracker->next;
				_mali_osk_free(buf);
			}

			/* release the address spaces */

			if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
			{
				engine->process_address->release(descriptor);
			}
		}
		engine->mali_address->release(descriptor);
	}

	MALI_ERROR(_MALI_OSK_ERR_FAULT);
}
Exemplo n.º 7
0
int ump_dmabuf_import_wrapper(u32 __user *argument,
				struct ump_session_data  *session_data)
{
	ump_session_memory_list_element *session = NULL;
	struct ump_uk_dmabuf ump_dmabuf;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block *blocks;
	struct dma_buf_attachment *attach;
	struct dma_buf *dma_buf;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	unsigned long block_size;
	/* FIXME */
	struct device dev;
	unsigned int i = 0, npages;
	int ret;

	/* Sanity check input parameters */
	if (!argument || !session_data) {
		MSG_ERR(("NULL parameter.\n"));
		return -EINVAL;
	}

	if (copy_from_user(&ump_dmabuf, argument,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_from_user() failed.\n"));
		return -EFAULT;
	}

	dma_buf = dma_buf_get(ump_dmabuf.fd);
	if (IS_ERR(dma_buf))
		return PTR_ERR(dma_buf);

	/*
	 * check whether dma_buf imported already exists or not.
	 *
	 * TODO
	 * if already imported then dma_buf_put() should be called
	 * and then just return dma_buf imported.
	 */

	attach = dma_buf_attach(dma_buf, &dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto err_dma_buf_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_dma_buf_detach;
	}

	npages = sgt->nents;

	/* really need? */
	ump_dmabuf.ctx = (void *)session_data;

	block_size = sizeof(ump_dd_physical_block) * npages;

	blocks = (ump_dd_physical_block *)_mali_osk_malloc(block_size);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks\n"));
		ret = -ENOMEM;
		goto err_dmu_buf_unmap;
	}

	sgl = sgt->sgl;

	while (i < npages) {
		blocks[i].addr = sg_phys(sgl);
		blocks[i].size = sg_dma_len(sgl);
		sgl = sg_next(sgl);
		i++;
	}

	/*
	 * Initialize the session memory list element, and add it
	 * to the session object
	 */
	session = _mali_osk_calloc(1, sizeof(*session));
	if (!session) {
		DBG_MSG(1, ("Failed to allocate session.\n"));
		ret = -EFAULT;
		goto err_free_block;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, i);
	if (UMP_DD_HANDLE_INVALID == ump_handle) {
		DBG_MSG(1, ("Failed to create ump handle.\n"));
		ret = -EFAULT;
		goto err_free_session;
	}

	session->mem = (ump_dd_mem *)ump_handle;

	_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	_mali_osk_list_add(&(session->list),
			&(session_data->list_head_session_memory_list));
	_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);

	_mali_osk_free(blocks);

	ump_dmabuf.ump_handle = (uint32_t)ump_handle;
	ump_dmabuf.size = ump_dd_size_get(ump_handle);

	if (copy_to_user(argument, &ump_dmabuf,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_to_user() failed.\n"));
		ret =  -EFAULT;
		goto err_release_ump_handle;
	}

	return 0;

err_release_ump_handle:
	ump_dd_reference_release(ump_handle);
err_free_session:
	_mali_osk_free(session);
err_free_block:
	_mali_osk_free(blocks);
err_dmu_buf_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_dma_buf_detach:
	dma_buf_detach(dma_buf, attach);
err_dma_buf_put:
	dma_buf_put(dma_buf);
	return ret;
}
Exemplo n.º 8
0
_mali_osk_errcode_t mali_pp_scheduler_initialize(void)
{
	struct mali_group *group;
	struct mali_pp_core *pp_core;
	_mali_osk_lock_flags_t lock_flags;
	u32 num_groups;
	u32 i;

#if defined(MALI_UPPER_HALF_SCHEDULING)
	lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
#else
	lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
#endif

	_MALI_OSK_INIT_LIST_HEAD(&job_queue);
	_MALI_OSK_INIT_LIST_HEAD(&group_list_working);
	_MALI_OSK_INIT_LIST_HEAD(&group_list_idle);

	_MALI_OSK_INIT_LIST_HEAD(&virtual_job_queue);

	pp_scheduler_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER);
	if (NULL == pp_scheduler_lock)
	{
		return _MALI_OSK_ERR_NOMEM;
	}

	pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
	if (NULL == pp_scheduler_working_wait_queue)
	{
		_mali_osk_lock_term(pp_scheduler_lock);
		return _MALI_OSK_ERR_NOMEM;
	}

	pp_scheduler_wq_schedule = _mali_osk_wq_create_work(mali_pp_scheduler_do_schedule, NULL);
	if (NULL == pp_scheduler_wq_schedule)
	{
		_mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
		_mali_osk_lock_term(pp_scheduler_lock);
		return _MALI_OSK_ERR_NOMEM;
	}

#if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE)
	pp_scheduler_wq_job_delete = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_delete, NULL);
	if (NULL == pp_scheduler_wq_job_delete)
	{
		_mali_osk_wq_delete_work(pp_scheduler_wq_schedule);
		_mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
		_mali_osk_lock_term(pp_scheduler_lock);
		return _MALI_OSK_ERR_NOMEM;
	}

	pp_scheduler_job_delete_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
	if (NULL == pp_scheduler_job_delete_lock)
	{
		_mali_osk_wq_delete_work(pp_scheduler_wq_job_delete);
		_mali_osk_wq_delete_work(pp_scheduler_wq_schedule);
		_mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
		_mali_osk_lock_term(pp_scheduler_lock);
		return _MALI_OSK_ERR_NOMEM;
	}
#endif

	num_groups = mali_group_get_glob_num_groups();

	/* Do we have a virtual group? */
	for (i = 0; i < num_groups; i++)
	{
		group = mali_group_get_glob_group(i);

		if (mali_group_is_virtual(group))
		{
			MALI_DEBUG_PRINT(3, ("Found virtual group %p\n", group));

			virtual_group = group;
			break;
		}
	}

	/* Find all the available PP cores */
	for (i = 0; i < num_groups; i++)
	{
		group = mali_group_get_glob_group(i);
		pp_core = mali_group_get_pp_core(group);

		if (NULL != pp_core && !mali_group_is_virtual(group))
		{
			if (0 == pp_version)
			{
				/* Retrieve PP version from the first available PP core */
				pp_version = mali_pp_core_get_version(pp_core);
			}

			if (NULL != virtual_group)
			{
				/* Add all physical PP cores to the virtual group */
				mali_group_lock(virtual_group);
				group->state = MALI_GROUP_STATE_JOINING_VIRTUAL;
				mali_group_add_group(virtual_group, group);
				mali_group_unlock(virtual_group);
			}
			else
			{
				_mali_osk_list_add(&group->pp_scheduler_list, &group_list_idle);
			}

			num_cores++;
		}
	}

	return _MALI_OSK_ERR_OK;
}