void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
{
	_mali_uk_gp_job_suspended_s * jobres;
	_mali_osk_notification_t * notification;

	mali_gp_scheduler_lock();

	notification = job->oom_notification;
	job->oom_notification = NULL;
	slot.returned_cookie = mali_gp_job_get_id(job);

	jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
	jobres->user_job_ptr = mali_gp_job_get_user_id(job);
	jobres->cookie = mali_gp_job_get_id(job);

	mali_gp_scheduler_unlock();

	jobres->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY;

	mali_session_send_notification(mali_gp_job_get_session(job), notification);

	/*
	* If this function failed, then we could return the job to user space right away,
	* but there is a job timer anyway that will do that eventually.
	* This is not exactly a common case anyway.
	*/
}
Example #2
0
void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
{
	_mali_osk_notification_t *notobj;

	notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));

	if (NULL != notobj)
	{
		_mali_uk_gp_job_suspended_s * jobres;

		mali_gp_scheduler_lock();

		jobres = (_mali_uk_gp_job_suspended_s *)notobj->result_buffer;

		jobres->user_job_ptr = mali_gp_job_get_user_id(job);
		jobres->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY;
		jobres->cookie = mali_gp_job_get_id(job);
		slot.returned_cookie = jobres->cookie;

		mali_session_send_notification(mali_gp_job_get_session(job), notobj);

		mali_gp_scheduler_unlock();
	}

	/*
	* If this function failed, then we could return the job to user space right away,
	* but there is a job timer anyway that will do that eventually.
	* This is not exactly a common case anyway.
	*/
}
Example #3
0
static void mali_gp_scheduler_schedule(void)
{
	struct mali_gp_job *job;

	MALI_ASSERT_GP_SCHEDULER_LOCKED();

	if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue))
	{
		MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
		                     pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
		return; /* Nothing to do, so early out */
	}

	job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);

	MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
	if (_MALI_OSK_ERR_OK == mali_group_start_gp_job(slot.group, job))
	{
		/* Mark slot as busy */
		slot.state = MALI_GP_SLOT_STATE_WORKING;

		/* Remove from queue of unscheduled jobs */
		_mali_osk_list_del(&job->list);
	}
	else
	{
		MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Failed to start GP job\n"));
	}
}
Example #4
0
void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
{
	_mali_osk_notification_t *notobj;

	notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));

	if (NULL != notobj)
	{
		_mali_uk_gp_job_suspended_s * jobres;

		mali_gp_scheduler_lock();

		jobres = (_mali_uk_gp_job_suspended_s *)notobj->result_buffer;

		jobres->user_job_ptr = mali_gp_job_get_user_id(job);
		jobres->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY;
		jobres->cookie = mali_gp_job_get_id(job);
		slot.returned_cookie = jobres->cookie;

		mali_session_send_notification(mali_gp_job_get_session(job), notobj);

		mali_gp_scheduler_unlock();
	}

}
_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs)
{
	struct mali_session_data *session;
	struct mali_gp_job *job;

	MALI_DEBUG_ASSERT_POINTER(uargs);
	MALI_DEBUG_ASSERT_POINTER(ctx);

	session = (struct mali_session_data*)ctx;

	job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id());
	if (NULL == job)
	{
		return _MALI_OSK_ERR_NOMEM;
	}

#if PROFILING_SKIP_PP_AND_GP_JOBS
#warning GP jobs will not be executed
	mali_gp_scheduler_return_job_to_user(job, MALI_TRUE);
	return _MALI_OSK_ERR_OK;
#endif

	mali_pm_core_event(MALI_CORE_EVENT_GP_START);

	mali_gp_scheduler_lock();
	_mali_osk_list_addtail(&job->list, &job_queue);
	mali_gp_scheduler_unlock();

	MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));

	mali_gp_scheduler_schedule();

	return _MALI_OSK_ERR_OK;
}
static void mali_gp_scheduler_schedule_on_group(struct mali_group *group)
{
	struct mali_gp_job *job;

	MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
	MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);

	if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue))
	{
		mali_gp_scheduler_unlock();
		MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
		                     pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
		return; /* Nothing to do, so early out */
	}

	/* Get (and remove) next job in queue */
	job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
	_mali_osk_list_del(&job->list);

	/* Mark slot as busy */
	slot.state = MALI_GP_SLOT_STATE_WORKING;

	mali_gp_scheduler_unlock();

	MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));

	if (_MALI_OSK_ERR_OK != mali_group_start_gp_job(slot.group, job))
	{
		MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Failed to start GP job\n"));
		MALI_DEBUG_ASSERT(0); /* this cant fail on Mali-300+, no need to implement put back of job */
	}
}
void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list)
{
	struct mali_gp_job *iter;
	struct mali_gp_job *tmp;

	MALI_DEBUG_ASSERT_POINTER(job);
	MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();

	/* Find position in list/queue where job should be added. */
	_MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
					    struct mali_gp_job, list) {

		/* A span is used to handle job ID wrapping. */
		bool job_is_after = (mali_gp_job_get_id(job) -
				     mali_gp_job_get_id(iter)) <
				    MALI_SCHEDULER_JOB_ID_SPAN;

		if (job_is_after) {
			break;
		}
	}
void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
{
	mali_group_lock(group);

	if (group->gp_state == MALI_GROUP_CORE_STATE_IDLE ||
	    mali_gp_job_get_id(group->gp_running_job) != job_id)
	{
		mali_group_unlock(group);
		return; /* No need to cancel or job has already been aborted or completed */
	}

	mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_FALSE); /* Will release group lock */
}
void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
{
	struct mali_gp_job *gp_job;
	struct mali_pp_job *pp_job;
	u32 gp_job_id = 0;
	u32 pp_job_id = 0;
	mali_bool abort_pp = MALI_FALSE;
	mali_bool abort_gp = MALI_FALSE;

	mali_group_lock(group);

	gp_job = group->gp_running_job;
	pp_job = group->pp_running_job;

	if (gp_job && mali_gp_job_get_session(gp_job) == session)
	{
		MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));

		gp_job_id = mali_gp_job_get_id(gp_job);
		abort_gp = MALI_TRUE;
	}

	if (pp_job && mali_pp_job_get_session(pp_job) == session)
	{
		MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));

		pp_job_id = mali_pp_job_get_id(pp_job);
		abort_pp = MALI_TRUE;
	}

	mali_group_unlock(group);

	/* These functions takes and releases the group lock */
	if (0 != abort_gp)
	{
		mali_group_abort_gp_job(group, gp_job_id);
	}
	if (0 != abort_pp)
	{
		mali_group_abort_pp_job(group, pp_job_id);
	}

	mali_group_lock(group);
	mali_group_remove_session_if_unused(group, session);
	mali_group_unlock(group);
}
void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
{
	mali_group_lock(group);

	if (group->gp_state != MALI_GROUP_CORE_STATE_OOM ||
	    mali_gp_job_get_id(group->gp_running_job) != job_id)
	{
		mali_group_unlock(group);
		return; /* Illegal request or job has already been aborted */
	}

	mali_cluster_l2_cache_invalidate_all_force(group->cluster);
	mali_mmu_zap_tlb_without_stall(group->mmu);

	mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
	group->gp_state = MALI_GROUP_CORE_STATE_WORKING;

	mali_group_unlock(group);
}
Example #11
0
_mali_osk_errcode_t _mali_ukk_gp_start_job(_mali_uk_gp_start_job_s *args)
{
	struct mali_session_data *session;
	struct mali_gp_job *job;

	MALI_DEBUG_ASSERT_POINTER(args);

	if (NULL == args->ctx)
	{
		return _MALI_OSK_ERR_INVALID_ARGS;
	}

	session = (struct mali_session_data*)args->ctx;
	if (NULL == session)
	{
		return _MALI_OSK_ERR_FAULT;
	}

	job = mali_gp_job_create(session, args, mali_scheduler_get_new_id());
	if (NULL == job)
	{
		return _MALI_OSK_ERR_NOMEM;
	}

#if PROFILING_SKIP_PP_AND_GP_JOBS
#warning GP jobs will not be executed
	mali_gp_scheduler_return_job_to_user(job, MALI_TRUE);
	return _MALI_OSK_ERR_OK;
#endif

	mali_gp_scheduler_lock();

	_mali_osk_list_addtail(&job->list, &job_queue);

	MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));

	mali_gp_scheduler_schedule();

	mali_gp_scheduler_unlock();

	return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
{
	struct mali_session_data *session;
	enum mali_group_activate_pd_status activate_status;

	MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state);

	mali_pm_core_event(MALI_CORE_EVENT_GP_START);

	session = mali_gp_job_get_session(job);

	mali_group_lock(group);

	mali_cluster_l2_cache_invalidate_all(group->cluster, mali_gp_job_get_id(job));

	activate_status = mali_group_activate_page_directory(group, session);
	if (MALI_GROUP_ACTIVATE_PD_STATUS_FAILED != activate_status)
	{
		/* if session is NOT kept Zapping is done as part of session switch */
		if (MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD == activate_status)
		{
			mali_mmu_zap_tlb_without_stall(group->mmu);
		}
		mali_gp_job_start(group->gp_core, job);
		group->gp_running_job = job;
		group->gp_state = MALI_GROUP_CORE_STATE_WORKING;

		mali_group_unlock(group);

		return _MALI_OSK_ERR_OK;
	}

#if defined(USING_MALI200)
	group->pagedir_activation_failed = MALI_TRUE;
#endif

	mali_group_unlock(group);

	mali_pm_core_event(MALI_CORE_EVENT_GP_STOP); /* Failed to start, so "cancel" the MALI_CORE_EVENT_GP_START */
	return _MALI_OSK_ERR_FAULT;
}
void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
{
	MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));

	mali_gp_scheduler_return_job_to_user(job, success);

	mali_gp_scheduler_lock();

	/* Mark slot as idle again */
	slot.state = MALI_GP_SLOT_STATE_IDLE;

	/* If paused, then this was the last job, so wake up sleeping workers */
	if (pause_count > 0)
	{
		_mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
	}

	mali_gp_scheduler_schedule_on_group(group);

	/* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
	mali_pm_core_event(MALI_CORE_EVENT_GP_STOP);
}
Example #14
0
void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
{
	MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));

	mali_gp_scheduler_lock();

	/* Mark slot as idle again */
	slot.state = MALI_GP_SLOT_STATE_IDLE;

	/* If paused, then this was the last job, so wake up sleeping workers */
	if (pause_count > 0)
	{
		_mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
	}
	else
	{
		mali_gp_scheduler_schedule();
	}

	mali_gp_scheduler_unlock();

	mali_gp_scheduler_return_job_to_user(job, success);
}
Example #15
0
void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
{
	MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));

	mali_gp_scheduler_lock();

	
	slot.state = MALI_GP_SLOT_STATE_IDLE;

	
	if (pause_count > 0)
	{
		_mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
	}
	else
	{
		mali_gp_scheduler_schedule();
	}

	mali_gp_scheduler_unlock();

	mali_gp_scheduler_return_job_to_user(job, success);
}
Example #16
0
_mali_osk_errcode_t _mali_ukk_gp_start_job(_mali_uk_gp_start_job_s *args)
{
	struct mali_session_data *session;
	struct mali_gp_job *job;

	MALI_DEBUG_ASSERT_POINTER(args);

	if (NULL == args->ctx)
	{
		return _MALI_OSK_ERR_INVALID_ARGS;
	}

	session = (struct mali_session_data*)args->ctx;
	if (NULL == session)
	{
		return _MALI_OSK_ERR_FAULT;
	}

	job = mali_gp_job_create(session, args, mali_scheduler_get_new_id());
	if (NULL == job)
	{
		return _MALI_OSK_ERR_NOMEM;
	}

	mali_gp_scheduler_lock();

	_mali_osk_list_addtail(&job->list, &job_queue);

	MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));

	mali_gp_scheduler_schedule();

	mali_gp_scheduler_unlock();

	return _MALI_OSK_ERR_OK;
}
Example #17
0
static void mali_gp_bottom_half(void *data)
{
    struct mali_gp_core *core = (struct mali_gp_core *)data;
    u32 irq_readout;
    u32 irq_errors;

#if MALI_TIMELINE_PROFILING_ENABLED
#if 0
    _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE ,  _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0);
#endif
#endif

    mali_group_lock(core->group);

    if ( MALI_FALSE == mali_group_power_is_on(core->group) )
    {
        MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description));
        mali_group_unlock(core->group);
        return;
    }

    irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
    MALI_DEBUG_PRINT(4, ("Mali GP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description));

    if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST))
    {
        u32 core_status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
        if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE))
        {
            mali_gp_post_process_job(core, MALI_FALSE);
            MALI_DEBUG_PRINT(4, ("Mali GP: Job completed, calling group handler\n"));
            mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_COMPLETED);
            return;
        }
    }

    irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
    if (0 != irq_errors)
    {
        mali_gp_post_process_job(core, MALI_FALSE);
        MALI_PRINT_ERROR(("Mali GP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description));
        mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_FAILED);
        return;
    }
    else if (MALI_TRUE == core->core_timed_out)
    {
        if (core->timeout_job_id == mali_gp_job_get_id(core->running_job))
        {
            mali_gp_post_process_job(core, MALI_FALSE);
            MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out\n", mali_gp_job_get_id(core->running_job)));
            mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_TIMED_OUT);
        }
        else
        {
            MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out but current job is %d\n", core->timeout_job_id, mali_gp_job_get_id(core->running_job)));
            mali_group_unlock(core->group);
        }
        core->core_timed_out = MALI_FALSE;
        return;
    }
    else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM)
    {
        mali_gp_post_process_job(core, MALI_TRUE);
        MALI_DEBUG_PRINT(3, ("Mali GP: PLBU needs more heap memory\n"));
        mali_group_bottom_half(core->group, GROUP_EVENT_GP_OOM);
        return;
    }
    else if (irq_readout & MALIGP2_REG_VAL_IRQ_HANG)
    {

        mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_HANG);
    }

    mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
    mali_group_unlock(core->group);

#if MALI_TIMELINE_PROFILING_ENABLED
#if 0
    _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE ,  _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0);
#endif
#endif
}
Example #18
0
void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
{
	u32 startcmd = 0;
	u32 *frame_registers = mali_gp_job_get_frame_registers(job);
	core->counter_src0_used = core->counter_src0;
	core->counter_src1_used = core->counter_src1;

	MALI_DEBUG_ASSERT_POINTER(core);
	MALI_ASSERT_GROUP_LOCKED(core->group);

	if (mali_gp_job_has_vs_job(job))
	{
		startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
	}

	if (mali_gp_job_has_plbu_job(job))
	{
		startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
	}

	MALI_DEBUG_ASSERT(0 != startcmd);

	mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);

#if PROFILING_PRINT_L2_HITRATE_ON_GP_FINISH
	{
		/* Read hits and Read misses*/
		mali_l2_cache_core_set_counter_src0(mali_l2_cache_core_get_glob_l2_core(0), 20);
		mali_l2_cache_core_set_counter_src1(mali_l2_cache_core_get_glob_l2_core(0), 21);
	}
#endif

	/* This selects which performance counters we are reading */
	if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
	{
		/* global_config has enabled HW counters, this will override anything specified by user space */
		if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
		{
			mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
			mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
		}
		if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
		{
			mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
			mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
		}
	}
	else
	{
		/* Use HW counters from job object, if any */
		u32 perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
		if (0 != perf_counter_flag)
		{
			if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
			{
				core->counter_src0_used = mali_gp_job_get_perf_counter_src0(job);
				mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
				mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
			}

			if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
			{
				core->counter_src1_used = mali_gp_job_get_perf_counter_src1(job);
				mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
				mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
			}
		}
	}

	MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));

	/* Barrier to make sure the previous register write is finished */
	_mali_osk_write_mem_barrier();

	/* This is the command that starts the core. */
	mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);

	/* Barrier to make sure the previous register write is finished */
	_mali_osk_write_mem_barrier();

	/* Setup the timeout timer value and save the job id for the job running on the gp core */

	_mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
	core->timeout_job_id = mali_gp_job_get_id(job);

#if MALI_TIMELINE_PROFILING_ENABLED
	_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
	                              mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
	_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
#endif

	core->running_job = job;
}
Example #19
0
static void mali_gp_bottom_half(void *data)
{
	struct mali_gp_core *core = (struct mali_gp_core *)data;
	u32 irq_readout;
	u32 irq_errors;

#if MALI_TIMELINE_PROFILING_ENABLED
	_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0);
#endif

	mali_group_lock(core->group); /* Group lock grabbed in core handlers, but released in common group handler */

	if ( MALI_FALSE == mali_group_power_is_on(core->group) )
	{
		MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description));
		mali_group_unlock(core->group);
#if MALI_TIMELINE_PROFILING_ENABLED
		_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
#endif
		return;
	}

	irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
	MALI_DEBUG_PRINT(4, ("Mali GP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description));

	if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST))
	{
		u32 core_status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
		if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE))
		{
			mali_gp_post_process_job(core, MALI_FALSE);
			MALI_DEBUG_PRINT(4, ("Mali GP: Job completed, calling group handler\n"));
			mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_COMPLETED); /* Will release group lock */
#if MALI_TIMELINE_PROFILING_ENABLED
		_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
#endif
			return;
		}
	}

	/*
	 * Now lets look at the possible error cases (IRQ indicating error or timeout)
	 * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
	 */
	irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
	if (0 != irq_errors)
	{
		mali_gp_post_process_job(core, MALI_FALSE);
		MALI_PRINT_ERROR(("Mali GP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description));
		mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_FAILED); /* Will release group lock */
#if MALI_TIMELINE_PROFILING_ENABLED
		_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
#endif
		return;
	}
	else if (MALI_TRUE == core->core_timed_out) /* SW timeout */
	{
		if (core->timeout_job_id == mali_gp_job_get_id(core->running_job))
		{
			mali_gp_post_process_job(core, MALI_FALSE);
			MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out\n", mali_gp_job_get_id(core->running_job)));
			mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_TIMED_OUT);
		}
		core->core_timed_out = MALI_FALSE;
#if MALI_TIMELINE_PROFILING_ENABLED
		_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
#endif
		return;
	}
	else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM)
	{
		/* GP wants more memory in order to continue.
		 *
		 * This must be handled prior to HANG because this actually can
		 * generate a HANG while waiting for more memory.
		 * And it must be handled before the completion interrupts,
		 * since the PLBU can run out of memory after VS is complete;
		 * in which case the OOM must be handled before to complete the
		 * PLBU work.
		 */
		mali_gp_post_process_job(core, MALI_TRUE);
		MALI_DEBUG_PRINT(3, ("Mali GP: PLBU needs more heap memory\n"));
		mali_group_bottom_half(core->group, GROUP_EVENT_GP_OOM); /* Will release group lock */
#if MALI_TIMELINE_PROFILING_ENABLED
		_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
#endif
		return;
	}
	else if (irq_readout & MALIGP2_REG_VAL_IRQ_HANG)
	{
		/* we mask hang interrupts, so this should never happen... */
		MALI_DEBUG_ASSERT( 0 );
	}

	/* The only way to get here is if we only got one of two needed END_CMD_LST
	 * interrupts. Disable the interrupt that has been received and continue to
	 * run. */
	mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK,
			MALIGP2_REG_VAL_IRQ_MASK_USED &
			((irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)
			? ~MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST
			: ~MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST
			));
	mali_group_unlock(core->group);

#if MALI_TIMELINE_PROFILING_ENABLED
	_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
#endif
}
Example #20
0
void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
{
    u32 startcmd = 0;
    u32 *frame_registers = mali_gp_job_get_frame_registers(job);
    core->counter_src0_used = core->counter_src0;
    core->counter_src1_used = core->counter_src1;

    MALI_DEBUG_ASSERT_POINTER(core);
    MALI_ASSERT_GROUP_LOCKED(core->group);

    if (mali_gp_job_has_vs_job(job))
    {
        startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
    }

    if (mali_gp_job_has_plbu_job(job))
    {
        startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
    }

    MALI_DEBUG_ASSERT(0 != startcmd);

    mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);

#if PROFILING_PRINT_L2_HITRATE_ON_GP_FINISH
    {

        mali_l2_cache_core_set_counter_src0(mali_l2_cache_core_get_glob_l2_core(0), 20);
        mali_l2_cache_core_set_counter_src1(mali_l2_cache_core_get_glob_l2_core(0), 21);
    }
#endif


    if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
    {

        if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
        {
            mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
            mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
        }
        if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
        {
            mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
            mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
        }
    }
    else
    {

        u32 perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
        if (0 != perf_counter_flag)
        {
            if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
            {
                core->counter_src0_used = mali_gp_job_get_perf_counter_src0(job);
                mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
                mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
            }

            if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
            {
                core->counter_src1_used = mali_gp_job_get_perf_counter_src1(job);
                mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
                mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
            }
        }
    }

    MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));


    _mali_osk_write_mem_barrier();


    mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);


    _mali_osk_write_mem_barrier();



    _mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
    core->timeout_job_id = mali_gp_job_get_id(job);

#if MALI_TIMELINE_PROFILING_ENABLED
    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
                                  job->frame_builder_id, job->flush_id, 0, 0, 0);
    _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), job->pid, job->tid, 0, 0, 0);
#endif

    core->running_job = job;
}