_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs)
{
	struct mali_session_data *session;
	struct mali_gp_job *job;

	MALI_DEBUG_ASSERT_POINTER(uargs);
	MALI_DEBUG_ASSERT_POINTER(ctx);

	session = (struct mali_session_data*)ctx;

	job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id());
	if (NULL == job)
	{
		return _MALI_OSK_ERR_NOMEM;
	}

#if PROFILING_SKIP_PP_AND_GP_JOBS
#warning GP jobs will not be executed
	mali_gp_scheduler_return_job_to_user(job, MALI_TRUE);
	return _MALI_OSK_ERR_OK;
#endif

	mali_pm_core_event(MALI_CORE_EVENT_GP_START);

	mali_gp_scheduler_lock();
	_mali_osk_list_addtail(&job->list, &job_queue);
	mali_gp_scheduler_unlock();

	MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));

	mali_gp_scheduler_schedule();

	return _MALI_OSK_ERR_OK;
}
Exemplo n.º 2
0
mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
{
	MALI_DEBUG_ASSERT_POINTER(cache);

	if (NULL != cache)
	{
		/* If the last cache invalidation was done by a job with a higher id we
		 * don't have to flush. Since user space will store jobs w/ their
		 * corresponding memory in sequence (first job #0, then job #1, ...),
		 * we don't have to flush for job n-1 if job n has already invalidated
		 * the cache since we know for sure that job n-1's memory was already
		 * written when job n was started. */
		if (((s32)id) <= ((s32)cache->last_invalidated_id))
		{
			return MALI_FALSE;
		}
		else
		{
			cache->last_invalidated_id = mali_scheduler_get_new_id();
		}

		mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
	}
	return MALI_TRUE;
}
Exemplo n.º 3
0
void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
{
	MALI_DEBUG_ASSERT_POINTER(cache);

	if (NULL != cache)
	{
		cache->last_invalidated_id = mali_scheduler_get_new_id();
		mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
	}
}
Exemplo n.º 4
0
void mali_l2_cache_invalidate_all(void)
{
	u32 i;
	for (i = 0; i < mali_global_num_l2_cache_cores; i++)
	{
		/*additional check*/
		if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i]))
		{
			_mali_osk_errcode_t ret;
			mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_id();
			ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
			if (_MALI_OSK_ERR_OK != ret)
			{
				MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
			}
		}
		mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
	}
}
_mali_osk_errcode_t _mali_ukk_pp_start_job(_mali_uk_pp_start_job_s *args)
{
	struct mali_session_data *session;
	struct mali_pp_job *job;

	MALI_DEBUG_ASSERT_POINTER(args);
	MALI_DEBUG_ASSERT_POINTER(args->ctx);

	session = (struct mali_session_data*)args->ctx;

	job = mali_pp_job_create(session, args, mali_scheduler_get_new_id());
	if (NULL == job)
	{
		return _MALI_OSK_ERR_NOMEM;
	}

	if (_MALI_OSK_ERR_OK != mali_pp_job_check(job))
	{
		/* Not a valid job, return to user immediately */
		mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
		mali_pp_scheduler_return_job_to_user(job); /* This will also delete the job object */
		return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
	}

#if PROFILING_SKIP_PP_JOBS || PROFILING_SKIP_PP_AND_GP_JOBS
#warning PP jobs will not be executed
	mali_pp_scheduler_return_job_to_user(job);
	return _MALI_OSK_ERR_OK;
#endif

	mali_pp_scheduler_lock();

	_mali_osk_list_addtail(&job->list, &job_queue);

	MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) with %u parts queued\n", mali_pp_job_get_id(job), job, mali_pp_job_get_sub_job_count(job)));

	mali_pp_scheduler_schedule();

	mali_pp_scheduler_unlock();

	return _MALI_OSK_ERR_OK;
}
Exemplo n.º 6
0
_mali_osk_errcode_t _mali_ukk_gp_start_job(_mali_uk_gp_start_job_s *args)
{
	struct mali_session_data *session;
	struct mali_gp_job *job;

	MALI_DEBUG_ASSERT_POINTER(args);

	if (NULL == args->ctx)
	{
		return _MALI_OSK_ERR_INVALID_ARGS;
	}

	session = (struct mali_session_data*)args->ctx;
	if (NULL == session)
	{
		return _MALI_OSK_ERR_FAULT;
	}

	job = mali_gp_job_create(session, args, mali_scheduler_get_new_id());
	if (NULL == job)
	{
		return _MALI_OSK_ERR_NOMEM;
	}

#if PROFILING_SKIP_PP_AND_GP_JOBS
#warning GP jobs will not be executed
	mali_gp_scheduler_return_job_to_user(job, MALI_TRUE);
	return _MALI_OSK_ERR_OK;
#endif

	mali_gp_scheduler_lock();

	_mali_osk_list_addtail(&job->list, &job_queue);

	MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));

	mali_gp_scheduler_schedule();

	mali_gp_scheduler_unlock();

	return _MALI_OSK_ERR_OK;
}
Exemplo n.º 7
0
_mali_osk_errcode_t _mali_ukk_gp_start_job(_mali_uk_gp_start_job_s *args)
{
	struct mali_session_data *session;
	struct mali_gp_job *job;

	MALI_DEBUG_ASSERT_POINTER(args);

	if (NULL == args->ctx)
	{
		return _MALI_OSK_ERR_INVALID_ARGS;
	}

	session = (struct mali_session_data*)args->ctx;
	if (NULL == session)
	{
		return _MALI_OSK_ERR_FAULT;
	}

	job = mali_gp_job_create(session, args, mali_scheduler_get_new_id());
	if (NULL == job)
	{
		return _MALI_OSK_ERR_NOMEM;
	}

	mali_gp_scheduler_lock();

	_mali_osk_list_addtail(&job->list, &job_queue);

	MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));

	mali_gp_scheduler_schedule();

	mali_gp_scheduler_unlock();

	return _MALI_OSK_ERR_OK;
}
void mali_cluster_l2_cache_invalidate_all(struct mali_cluster *cluster, u32 id)
{
	MALI_DEBUG_ASSERT_POINTER(cluster);

	if (NULL != cluster->l2)
	{
		/* If the last cache invalidation was done by a job with a higher id we
		 * don't have to flush. Since user space will store jobs w/ their
		 * corresponding memory in sequence (first job #0, then job #1, ...),
		 * we don't have to flush for job n-1 if job n has already invalidated
		 * the cache since we know for sure that job n-1's memory was already
		 * written when job n was started. */
		if (cluster->last_invalidated_id > id)
		{
			return;
		}
		else
		{
			cluster->last_invalidated_id = mali_scheduler_get_new_id();
		}

		mali_l2_cache_invalidate_all(cluster->l2);
	}
}