_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args) { _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event; MALI_IGNORE(event); /* event is not used for release code, and that is OK */ #if defined(CONFIG_MALI400_PROFILING) /* * Manually generate user space events in kernel space. * This saves user space from calling kernel space twice in this case. * We just need to remember to add pid and tid manually. */ if ( event==_MALI_UK_VSYNC_EVENT_BEGIN_WAIT) { _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC, _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); } if (event==_MALI_UK_VSYNC_EVENT_END_WAIT) { _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC, _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); } #endif MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event)); MALI_SUCCESS; }
_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args) { _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]); return _MALI_OSK_ERR_OK; }
_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args) { /* Always add process and thread identificator in the first two data elements for events from user space */ _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]); return _MALI_OSK_ERR_OK; }
struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *args, u32 id) { struct mali_gp_job *job; job = _mali_osk_malloc(sizeof(struct mali_gp_job)); if (NULL != job) { _mali_osk_list_init(&job->list); job->session = session; job->id = id; job->user_id = args->user_job_ptr; _mali_osk_memcpy(job->frame_registers, args->frame_registers, sizeof(job->frame_registers)); job->heap_current_addr = args->frame_registers[4]; job->perf_counter_flag = args->perf_counter_flag; job->perf_counter_src0 = args->perf_counter_src0; job->perf_counter_src1 = args->perf_counter_src1; job->perf_counter_value0 = 0; job->perf_counter_value1 = 0; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); job->frame_builder_id = args->frame_builder_id; job->flush_id = args->flush_id; return job; } return NULL; }
struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id) { struct mali_gp_job *job; u32 perf_counter_flag; job = _mali_osk_malloc(sizeof(struct mali_gp_job)); if (NULL != job) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s)); if (NULL == job->finished_notification) { _mali_osk_free(job); return NULL; } job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s)); if (NULL == job->oom_notification) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_free(job); return NULL; } if (0 != copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_notification_delete(job->oom_notification); _mali_osk_free(job); return NULL; } perf_counter_flag = mali_gp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0()); mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1()); } _mali_osk_list_init(&job->list); job->session = session; job->id = id; job->heap_current_addr = job->uargs.frame_registers[4]; job->perf_counter_value0 = 0; job->perf_counter_value1 = 0; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); return job; } return NULL; }
int compositor_priority_wrapper(struct mali_session_data *session_data) { #ifndef CONFIG_SYNC /* Compositor super priority is currently only needed and supported in * systems without linux fences */ _mali_ukk_compositor_priority(session_data); #else MALI_DEBUG_PRINT(2, ("Compositor Pid: %d - Using native fence\n", _mali_osk_get_pid() )); #endif return 0; }
int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs) { _mali_uk_get_api_version_s kargs; _mali_osk_errcode_t err; u32 mem = _mali_ukk_report_memory_usage(); printk("Mali: mem_usage before %d : %u\n", _mali_osk_get_pid(), mem); MALI_CHECK_NON_NULL(uargs, -EINVAL); if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT; kargs.ctx = session_data; err = _mali_ukk_get_api_version(&kargs); if (_MALI_OSK_ERR_OK != err) return map_errcode(err); if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT; if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT; return 0; }
int stream_create_wrapper(struct mali_session_data *session_data, _mali_uk_stream_create_s __user *uargs) { _mali_uk_stream_create_s kargs; _mali_osk_errcode_t err; char name[32]; MALI_CHECK_NON_NULL(uargs, -EINVAL); snprintf(name, 32, "mali-%u", _mali_osk_get_pid()); kargs.ctx = session_data; err = mali_stream_create(name, &kargs.fd); if (_MALI_OSK_ERR_OK != err) { return map_errcode(err); } kargs.ctx = NULL; /* prevent kernel address to be returned to user space */ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_stream_create_s))) return -EFAULT; return 0; }
void _mali_osk_profiling_report_sw_counters(u32 *counters) { trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters); }
struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id) { struct mali_pp_job *job; u32 perf_counter_flag; job = _mali_osk_calloc(1, sizeof(struct mali_pp_job)); if (NULL != job) { if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) { goto fail; } if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) { MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n")); goto fail; } if (!mali_pp_job_use_no_notification(job)) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s)); if (NULL == job->finished_notification) goto fail; } perf_counter_flag = mali_pp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { mali_pp_job_set_perf_counter_src0(job, mali_pp_job_get_pp_counter_src0()); mali_pp_job_set_perf_counter_src1(job, mali_pp_job_get_pp_counter_src1()); } _mali_osk_list_init(&job->list); job->session = session; _mali_osk_list_init(&job->session_list); job->id = id; job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); job->num_memory_cookies = job->uargs.num_memory_cookies; if (job->num_memory_cookies > 0) { u32 size; if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) { MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n")); goto fail; } size = sizeof(*job->uargs.memory_cookies) * job->num_memory_cookies; job->memory_cookies = _mali_osk_malloc(size); if (NULL == job->memory_cookies) { MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size)); goto fail; } if (0 != _mali_osk_copy_from_user(job->memory_cookies, job->uargs.memory_cookies, size)) { MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size)); goto fail; } #if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) job->num_dma_bufs = job->num_memory_cookies; job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *)); if (NULL == job->dma_bufs) { MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n")); goto fail; } #endif } else { job->memory_cookies = NULL; } return job; } fail: if (NULL != job) { mali_pp_job_delete(job); } return NULL; }
_mali_osk_errcode_t _mali_ukk_open(void **context) { int i; _mali_osk_errcode_t err; #ifdef MALI_SESSION_MEMORY_USAGE struct mali_session_data_list * session_data_list; #endif struct mali_session_data * session_data; /* allocated struct to track this session */ session_data = (struct mali_session_data *)_mali_osk_malloc(sizeof(struct mali_session_data)); MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_NOMEM); #ifdef MALI_SESSION_MEMORY_USAGE session_data_list = (struct mali_session_data_list *)_mali_osk_malloc(sizeof(struct mali_session_data_list)); if (session_data_list == NULL) { _mali_osk_free(session_data); return _MALI_OSK_ERR_NOMEM; } _MALI_OSK_INIT_LIST_HEAD(&session_data_list->list_head); session_data_list->pid = _mali_osk_get_pid(); session_data_list->session_data = session_data; session_data->list = session_data_list; #endif _mali_osk_memset(session_data->subsystem_data, 0, sizeof(session_data->subsystem_data)); /* create a response queue for this session */ session_data->ioctl_queue = _mali_osk_notification_queue_init(); if (NULL == session_data->ioctl_queue) { _mali_osk_free(session_data); #ifdef MALI_SESSION_MEMORY_USAGE _mali_osk_free(session_data_list); #endif MALI_ERROR(_MALI_OSK_ERR_NOMEM); } MALI_DEBUG_PRINT(3, ("Session starting\n")); /* call session_begin on all subsystems */ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i) { if (NULL != subsystems[i]->session_begin) { /* subsystem has a session_begin */ err = subsystems[i]->session_begin(session_data, &session_data->subsystem_data[i], session_data->ioctl_queue); MALI_CHECK_GOTO(err == _MALI_OSK_ERR_OK, cleanup); } } *context = (void*)session_data; #ifdef MALI_SESSION_MEMORY_USAGE _mali_osk_lock_wait( session_data_lock, _MALI_OSK_LOCKMODE_RW ); _mali_osk_list_addtail(&session_data_list->list_head, &session_data_head); _mali_osk_lock_signal( session_data_lock, _MALI_OSK_LOCKMODE_RW ); #endif MALI_DEBUG_PRINT(3, ("Session started\n")); MALI_SUCCESS; cleanup: MALI_DEBUG_PRINT(2, ("Session startup failed\n")); /* i is index of subsystem which failed session begin, all indices before that has to be ended */ /* end subsystem sessions in the reverse order they where started in */ for (i = i - 1; i >= 0; --i) { if (NULL != subsystems[i]->session_end) subsystems[i]->session_end(session_data, &session_data->subsystem_data[i]); } _mali_osk_notification_queue_term(session_data->ioctl_queue); _mali_osk_free(session_data); #ifdef MALI_SESSION_MEMORY_USAGE _mali_osk_free(session_data_list); #endif /* return what the subsystem which failed session start returned */ MALI_ERROR(err); }
struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id) { struct mali_pp_job *job; u32 perf_counter_flag; job = _mali_osk_malloc(sizeof(struct mali_pp_job)); if (NULL != job) { u32 i; if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) { _mali_osk_free(job); return NULL; } if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) { MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n")); _mali_osk_free(job); return NULL; } if (!mali_pp_job_use_no_notification(job)) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s)); if (NULL == job->finished_notification) { _mali_osk_free(job); return NULL; } } else { job->finished_notification = NULL; } perf_counter_flag = mali_pp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { mali_pp_job_set_perf_counter_src0(job, mali_pp_job_get_pp_counter_src0()); mali_pp_job_set_perf_counter_src1(job, mali_pp_job_get_pp_counter_src1()); } _mali_osk_list_init(&job->list); job->session = session; _mali_osk_list_init(&job->session_list); job->id = id; for (i = 0; i < job->uargs.num_cores; i++) { job->perf_counter_value0[i] = 0; job->perf_counter_value1[i] = 0; } job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1; job->sub_jobs_started = 0; job->sub_jobs_completed = 0; job->sub_job_errors = 0; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); #if defined(MTK_CONFIG_SYNC) job->sync_point = NULL; job->pre_fence = NULL; job->sync_work = NULL; #endif return job; } return NULL; }
struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s __user *uargs, u32 id) { struct mali_pp_job *job; u32 perf_counter_flag; job = _mali_osk_calloc(1, sizeof(struct mali_pp_job)); if (NULL != job) { if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) { goto fail; } if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) { MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n")); goto fail; } if (!mali_pp_job_use_no_notification(job)) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s)); if (NULL == job->finished_notification) goto fail; } perf_counter_flag = mali_pp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count); /* These counters apply for all virtual jobs, and where no per sub job counter is specified */ job->uargs.perf_counter_src0 = pp_counter_src0; job->uargs.perf_counter_src1 = pp_counter_src1; /* We only copy the per sub job array if it is enabled with at least one counter */ if (0 < sub_job_count) { job->perf_counter_per_sub_job_count = sub_job_count; _mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0)); _mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1)); } } _mali_osk_list_init(&job->list); job->session = session; _mali_osk_list_init(&job->session_list); job->id = id; job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); job->num_memory_cookies = job->uargs.num_memory_cookies; if (job->num_memory_cookies > 0) { u32 size; u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies; if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) { MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n")); goto fail; } size = sizeof(*memory_cookies) * job->num_memory_cookies; job->memory_cookies = _mali_osk_malloc(size); if (NULL == job->memory_cookies) { MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size)); goto fail; } if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) { MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size)); goto fail; } #if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) job->num_dma_bufs = job->num_memory_cookies; job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *)); if (NULL == job->dma_bufs) { MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n")); goto fail; } #endif } /* Prepare DMA command buffer to start job, if it is virtual. */ if (mali_pp_job_is_virtual_group_job(job)) { struct mali_pp_core *core; _mali_osk_errcode_t err = mali_dma_get_cmd_buf(&job->dma_cmd_buf); if (_MALI_OSK_ERR_OK != err) { MALI_PRINT_ERROR(("Mali PP job: Failed to allocate DMA command buffer\n")); goto fail; } core = mali_pp_scheduler_get_virtual_pp(); MALI_DEBUG_ASSERT_POINTER(core); mali_pp_job_dma_cmd_prepare(core, job, 0, &job->dma_cmd_buf); } if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) { /* Not a valid job. */ goto fail; } mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job); mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence)); return job; } fail: if (NULL != job) { mali_pp_job_delete(job); } return NULL; }
static void mali_gp_bottom_half(void *data) { struct mali_gp_core *core = (struct mali_gp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0); #endif #endif mali_group_lock(core->group); if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali GP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) { u32 core_status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS); if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(4, ("Mali GP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_COMPLETED); return; } } irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM); if (0 != irq_errors) { mali_gp_post_process_job(core, MALI_FALSE); MALI_PRINT_ERROR(("Mali GP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_FAILED); return; } else if (MALI_TRUE == core->core_timed_out) { if (core->timeout_job_id == mali_gp_job_get_id(core->running_job)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out\n", mali_gp_job_get_id(core->running_job))); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_TIMED_OUT); } else { MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out but current job is %d\n", core->timeout_job_id, mali_gp_job_get_id(core->running_job))); mali_group_unlock(core->group); } core->core_timed_out = MALI_FALSE; return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) { mali_gp_post_process_job(core, MALI_TRUE); MALI_DEBUG_PRINT(3, ("Mali GP: PLBU needs more heap memory\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_OOM); return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_HANG) { mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_HANG); } mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0); #endif #endif }
static void mali_gp_bottom_half(void *data) { struct mali_gp_core *core = (struct mali_gp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 /* Bottom half TLP logging is currently not supported */ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0); #endif #endif mali_group_lock(core->group); /* Group lock grabbed in core handlers, but released in common group handler */ if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali GP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) { u32 core_status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS); if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(4, ("Mali GP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_COMPLETED); /* Will release group lock */ return; } } /* * Now lets look at the possible error cases (IRQ indicating error or timeout) * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error. */ irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM); if (0 != irq_errors) { mali_gp_post_process_job(core, MALI_FALSE); MALI_PRINT_ERROR(("Mali GP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_FAILED); /* Will release group lock */ return; } else if (MALI_TRUE == core->core_timed_out) /* SW timeout */ { if (core->timeout_job_id == mali_gp_job_get_id(core->running_job)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out\n", mali_gp_job_get_id(core->running_job))); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_TIMED_OUT); } core->core_timed_out = MALI_FALSE; return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) { /* GP wants more memory in order to continue. * * This must be handled prior to HANG because this actually can * generate a HANG while waiting for more memory. * And it must be handled before the completion interrupts, * since the PLBU can run out of memory after VS is complete; * in which case the OOM must be handled before to complete the * PLBU work. */ mali_gp_post_process_job(core, MALI_TRUE); MALI_DEBUG_PRINT(3, ("Mali GP: PLBU needs more heap memory\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_OOM); /* Will release group lock */ return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_HANG) { /* Just ignore hang interrupts, the job timer will detect hanging jobs anyways */ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_HANG); } /* * The only way to get here is if we got a HANG interrupt, which we ignore, or only one of two needed END_CMD_LST interrupts. * Re-enable interrupts and let core continue to run. */ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 /* Bottom half TLP logging is currently not supported */ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0); #endif #endif }
static void mali_pp_bottom_half(void *data) { struct mali_pp_core *core = (struct mali_pp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 /* Bottom half TLP logging is currently not supported */ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); #endif #endif mali_group_lock(core->group); /* Group lock grabbed in core handlers, but released in common group handler */ if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & MALI200_REG_VAL_IRQ_END_OF_FRAME) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_COMPLETED); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } /* * Now lets look at the possible error cases (IRQ indicating error or timeout) * END_OF_FRAME and HANG interrupts are not considered error. */ irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG); if (0 != irq_errors) { mali_pp_post_process_job(core); MALI_PRINT_ERROR(("Mali PP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_FAILED); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } else if (MALI_TRUE == core->core_timed_out) /* SW timeout */ { if (core->timeout_job_id == mali_pp_job_get_id(core->running_job)) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(2, ("Mali PP: Job %d timed out on core %s\n", mali_pp_job_get_id(core->running_job), core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_TIMED_OUT); /* Will release group lock */ } else { mali_group_unlock(core->group); } core->core_timed_out = MALI_FALSE; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } else if (irq_readout & MALI200_REG_VAL_IRQ_HANG) { /* Just ignore hang interrupts, the job timer will detect hanging jobs anyways */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG); } /* * The only way to get here is if we got a HANG interrupt, which we ignore. * Re-enable interrupts and let core continue to run */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 /* Bottom half TLP logging is currently not supported */ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif }
struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker) { struct mali_gp_job *job; u32 perf_counter_flag; job = _mali_osk_malloc(sizeof(struct mali_gp_job)); if (NULL != job) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s)); if (NULL == job->finished_notification) { _mali_osk_free(job); return NULL; } job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s)); if (NULL == job->oom_notification) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_free(job); return NULL; } if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_notification_delete(job->oom_notification); _mali_osk_free(job); return NULL; } perf_counter_flag = mali_gp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0()); mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1()); } _mali_osk_list_init(&job->list); job->session = session; job->id = id; job->heap_current_addr = job->uargs.frame_registers[4]; job->perf_counter_value0 = 0; job->perf_counter_value1 = 0; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); job->pp_tracker = pp_tracker; if (NULL != job->pp_tracker) { /* Take a reference on PP job's tracker that will be released when the GP job is done. */ mali_timeline_system_tracker_get(session->timeline_system, pp_tracker); } mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_GP, NULL, job); mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence)); return job; } return NULL; }
static void mali_pp_bottom_half(void *data) { struct mali_pp_core *core = (struct mali_pp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); #endif #endif mali_group_lock(core->group); if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & MALI200_REG_VAL_IRQ_END_OF_FRAME) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_COMPLETED); return; } irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG); if (0 != irq_errors) { mali_pp_post_process_job(core); MALI_PRINT_ERROR(("Mali PP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_FAILED); return; } else if (MALI_TRUE == core->core_timed_out) { if (core->timeout_job_id == mali_pp_job_get_id(core->running_job)) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(2, ("Mali PP: Job %d timed out on core %s\n", mali_pp_job_get_id(core->running_job), core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_TIMED_OUT); } else { mali_group_unlock(core->group); } core->core_timed_out = MALI_FALSE; return; } else if (irq_readout & MALI200_REG_VAL_IRQ_HANG) { mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG); } mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); #endif #endif }