_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args) { _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event; MALI_IGNORE(event); /* event is not used for release code, and that is OK */ #if defined(CONFIG_MALI400_PROFILING) /* * Manually generate user space events in kernel space. * This saves user space from calling kernel space twice in this case. * We just need to remember to add pid and tid manually. */ if ( event==_MALI_UK_VSYNC_EVENT_BEGIN_WAIT) { _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC, _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); } if (event==_MALI_UK_VSYNC_EVENT_END_WAIT) { _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC, _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); } #endif MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event)); MALI_SUCCESS; }
void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker) { checker->owner = _mali_osk_get_tid(); #ifdef LOCK_ORDER_CHECKING if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) { if (!add_lock_to_log_and_check(checker, _mali_osk_get_tid())) { printk(KERN_ERR "%d: ERROR lock %p taken while holding a lock of a higher order.\n", _mali_osk_get_tid(), checker); dump_stack(); } } #endif }
MALI_STATIC_INLINE void mali_pp_scheduler_unlock(void) { MALI_DEBUG_PRINT(5, ("Mali PP scheduler: Releasing PP scheduler lock\n")); MALI_DEBUG_ASSERT(_mali_osk_get_tid() == pp_scheduler_lock_owner); MALI_DEBUG_CODE(pp_scheduler_lock_owner = 0); _mali_osk_lock_signal(pp_scheduler_lock, _MALI_OSK_LOCKMODE_RW); }
struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *args, u32 id) { struct mali_gp_job *job; job = _mali_osk_malloc(sizeof(struct mali_gp_job)); if (NULL != job) { _mali_osk_list_init(&job->list); job->session = session; job->id = id; job->user_id = args->user_job_ptr; _mali_osk_memcpy(job->frame_registers, args->frame_registers, sizeof(job->frame_registers)); job->heap_current_addr = args->frame_registers[4]; job->perf_counter_flag = args->perf_counter_flag; job->perf_counter_src0 = args->perf_counter_src0; job->perf_counter_src1 = args->perf_counter_src1; job->perf_counter_value0 = 0; job->perf_counter_value1 = 0; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); job->frame_builder_id = args->frame_builder_id; job->flush_id = args->flush_id; return job; } return NULL; }
_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args) { _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]); return _MALI_OSK_ERR_OK; }
_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args) { /* Always add process and thread identificator in the first two data elements for events from user space */ _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]); return _MALI_OSK_ERR_OK; }
MALI_STATIC_INLINE void mali_soft_job_system_unlock(struct mali_soft_job_system *system) { MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_PRINT(5, ("Mali Soft Job: releasing soft system %p lock\n", system)); MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner); MALI_DEBUG_CODE(system->lock_owner = 0); _mali_osk_spinlock_irq_unlock(system->lock); }
MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system) { MALI_DEBUG_ASSERT_POINTER(system); _mali_osk_spinlock_irq_lock(system->lock); MALI_DEBUG_PRINT(5, ("Mali Soft Job: soft system %p lock taken\n", system)); MALI_DEBUG_ASSERT(0 == system->lock_owner); MALI_DEBUG_CODE(system->lock_owner = _mali_osk_get_tid()); }
struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id) { struct mali_gp_job *job; u32 perf_counter_flag; job = _mali_osk_malloc(sizeof(struct mali_gp_job)); if (NULL != job) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s)); if (NULL == job->finished_notification) { _mali_osk_free(job); return NULL; } job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s)); if (NULL == job->oom_notification) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_free(job); return NULL; } if (0 != copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_notification_delete(job->oom_notification); _mali_osk_free(job); return NULL; } perf_counter_flag = mali_gp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0()); mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1()); } _mali_osk_list_init(&job->list); job->session = session; job->id = id; job->heap_current_addr = job->uargs.frame_registers[4]; job->perf_counter_value0 = 0; job->perf_counter_value1 = 0; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); return job; } return NULL; }
void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker) { #ifdef LOCK_ORDER_CHECKING if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) { remove_lock_from_log(checker, _mali_osk_get_tid()); } #endif checker->owner = 0; }
MALI_STATIC_INLINE void mali_pp_scheduler_lock(void) { if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(pp_scheduler_lock, _MALI_OSK_LOCKMODE_RW)) { /* Non-interruptable lock failed: this should never happen. */ MALI_DEBUG_ASSERT(0); } MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken\n")); MALI_DEBUG_ASSERT(0 == pp_scheduler_lock_owner); MALI_DEBUG_CODE(pp_scheduler_lock_owner = _mali_osk_get_tid()); }
static void mali_pp_bottom_half(void *data) { struct mali_pp_core *core = (struct mali_pp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 /* Bottom half TLP logging is currently not supported */ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); #endif #endif mali_group_lock(core->group); /* Group lock grabbed in core handlers, but released in common group handler */ if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & MALI200_REG_VAL_IRQ_END_OF_FRAME) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_COMPLETED); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } /* * Now lets look at the possible error cases (IRQ indicating error or timeout) * END_OF_FRAME and HANG interrupts are not considered error. */ irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG); if (0 != irq_errors) { mali_pp_post_process_job(core); MALI_PRINT_ERROR(("Mali PP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_FAILED); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } else if (MALI_TRUE == core->core_timed_out) /* SW timeout */ { if (core->timeout_job_id == mali_pp_job_get_id(core->running_job)) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(2, ("Mali PP: Job %d timed out on core %s\n", mali_pp_job_get_id(core->running_job), core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_TIMED_OUT); /* Will release group lock */ } else { mali_group_unlock(core->group); } core->core_timed_out = MALI_FALSE; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } else if (irq_readout & MALI200_REG_VAL_IRQ_HANG) { /* Just ignore hang interrupts, the job timer will detect hanging jobs anyways */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG); } /* * The only way to get here is if we got a HANG interrupt, which we ignore. * Re-enable interrupts and let core continue to run */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 /* Bottom half TLP logging is currently not supported */ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif }
struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id) { struct mali_pp_job *job; u32 perf_counter_flag; job = _mali_osk_malloc(sizeof(struct mali_pp_job)); if (NULL != job) { u32 i; if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) { _mali_osk_free(job); return NULL; } if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) { MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n")); _mali_osk_free(job); return NULL; } if (!mali_pp_job_use_no_notification(job)) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s)); if (NULL == job->finished_notification) { _mali_osk_free(job); return NULL; } } else { job->finished_notification = NULL; } perf_counter_flag = mali_pp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { mali_pp_job_set_perf_counter_src0(job, mali_pp_job_get_pp_counter_src0()); mali_pp_job_set_perf_counter_src1(job, mali_pp_job_get_pp_counter_src1()); } _mali_osk_list_init(&job->list); job->session = session; _mali_osk_list_init(&job->session_list); job->id = id; for (i = 0; i < job->uargs.num_cores; i++) { job->perf_counter_value0[i] = 0; job->perf_counter_value1[i] = 0; } job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1; job->sub_jobs_started = 0; job->sub_jobs_completed = 0; job->sub_job_errors = 0; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); #if defined(MTK_CONFIG_SYNC) job->sync_point = NULL; job->pre_fence = NULL; job->sync_work = NULL; #endif return job; } return NULL; }
/** * Check if fence has been signaled. * * @param system Timeline system. * @param fence Timeline fence. * @return MALI_TRUE if fence is signaled, MALI_FALSE if not. */ static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence) { int i; u32 tid = _mali_osk_get_tid(); mali_bool ret = MALI_TRUE; #if defined(CONFIG_SYNC) struct sync_fence *sync_fence = NULL; #endif MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT_POINTER(fence); mali_spinlock_reentrant_wait(system->spinlock, tid); for (i = 0; i < MALI_TIMELINE_MAX; ++i) { struct mali_timeline *timeline; mali_timeline_point point; point = fence->points[i]; if (likely(MALI_TIMELINE_NO_POINT == point)) { /* Fence contains no point on this timeline. */ continue; } timeline = system->timelines[i]; MALI_DEBUG_ASSERT_POINTER(timeline); if (unlikely(!mali_timeline_is_point_valid(timeline, point))) { MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next)); } if (!mali_timeline_is_point_released(timeline, point)) { ret = MALI_FALSE; goto exit; } } #if defined(CONFIG_SYNC) if (-1 != fence->sync_fd) { sync_fence = sync_fence_fdget(fence->sync_fd); if (likely(NULL != sync_fence)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) if (0 == sync_fence->status) { #else if (0 == atomic_read(&sync_fence->status)) { #endif ret = MALI_FALSE; } } else { MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd)); } } #endif /* defined(CONFIG_SYNC) */ exit: mali_spinlock_reentrant_signal(system->spinlock, tid); #if defined(CONFIG_SYNC) if (NULL != sync_fence) { sync_fence_put(sync_fence); } #endif /* defined(CONFIG_SYNC) */ return ret; } mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout) { struct mali_timeline_fence_wait_tracker *wait; mali_timeline_point point; mali_bool ret; MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT_POINTER(fence); MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n")); if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) { return mali_timeline_fence_wait_check_status(system, fence); } wait = mali_timeline_fence_wait_tracker_alloc(); if (unlikely(NULL == wait)) { MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n")); return MALI_FALSE; } wait->activated = MALI_FALSE; wait->system = system; /* Initialize refcount to two references. The reference first will be released by this * function after the wait is over. The second reference will be released when the tracker * is activated. */ _mali_osk_atomic_init(&wait->refcount, 2); /* Add tracker to timeline system, but not to a timeline. */ mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait); point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE); MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point); MALI_IGNORE(point); /* Wait for the tracker to be activated or time out. */ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) { _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait); } else { _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout); } ret = wait->activated; if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { mali_timeline_fence_wait_tracker_free(wait); } return ret; } void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait) { mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; MALI_DEBUG_ASSERT_POINTER(wait); MALI_DEBUG_ASSERT_POINTER(wait->system); MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n")); MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated); wait->activated = MALI_TRUE; _mali_osk_wait_queue_wake_up(wait->system->wait_queue); /* Nothing can wait on this tracker, so nothing to schedule after release. */ schedule_mask = mali_timeline_tracker_release(&wait->tracker); MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask); MALI_IGNORE(schedule_mask); if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { mali_timeline_fence_wait_tracker_free(wait); } }
void _mali_osk_profiling_report_sw_counters(u32 *counters) { trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters); }
MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_system *system) { MALI_DEBUG_ASSERT_POINTER(system); MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner); }
MALI_STATIC_INLINE void mali_pp_scheduler_assert_locked(void) { MALI_DEBUG_ASSERT(_mali_osk_get_tid() == pp_scheduler_lock_owner); }
struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id) { struct mali_pp_job *job; u32 perf_counter_flag; job = _mali_osk_calloc(1, sizeof(struct mali_pp_job)); if (NULL != job) { if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) { goto fail; } if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) { MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n")); goto fail; } if (!mali_pp_job_use_no_notification(job)) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s)); if (NULL == job->finished_notification) goto fail; } perf_counter_flag = mali_pp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { mali_pp_job_set_perf_counter_src0(job, mali_pp_job_get_pp_counter_src0()); mali_pp_job_set_perf_counter_src1(job, mali_pp_job_get_pp_counter_src1()); } _mali_osk_list_init(&job->list); job->session = session; _mali_osk_list_init(&job->session_list); job->id = id; job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); job->num_memory_cookies = job->uargs.num_memory_cookies; if (job->num_memory_cookies > 0) { u32 size; if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) { MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n")); goto fail; } size = sizeof(*job->uargs.memory_cookies) * job->num_memory_cookies; job->memory_cookies = _mali_osk_malloc(size); if (NULL == job->memory_cookies) { MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size)); goto fail; } if (0 != _mali_osk_copy_from_user(job->memory_cookies, job->uargs.memory_cookies, size)) { MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size)); goto fail; } #if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) job->num_dma_bufs = job->num_memory_cookies; job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *)); if (NULL == job->dma_bufs) { MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n")); goto fail; } #endif } else { job->memory_cookies = NULL; } return job; } fail: if (NULL != job) { mali_pp_job_delete(job); } return NULL; }
static void mali_gp_bottom_half(void *data) { struct mali_gp_core *core = (struct mali_gp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0); #endif mali_group_lock(core->group); /* Group lock grabbed in core handlers, but released in common group handler */ if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali GP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) { u32 core_status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS); if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(4, ("Mali GP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_COMPLETED); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif return; } } /* * Now lets look at the possible error cases (IRQ indicating error or timeout) * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error. */ irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM); if (0 != irq_errors) { mali_gp_post_process_job(core, MALI_FALSE); MALI_PRINT_ERROR(("Mali GP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_FAILED); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif return; } else if (MALI_TRUE == core->core_timed_out) /* SW timeout */ { if (core->timeout_job_id == mali_gp_job_get_id(core->running_job)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out\n", mali_gp_job_get_id(core->running_job))); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_TIMED_OUT); } core->core_timed_out = MALI_FALSE; #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) { /* GP wants more memory in order to continue. * * This must be handled prior to HANG because this actually can * generate a HANG while waiting for more memory. * And it must be handled before the completion interrupts, * since the PLBU can run out of memory after VS is complete; * in which case the OOM must be handled before to complete the * PLBU work. */ mali_gp_post_process_job(core, MALI_TRUE); MALI_DEBUG_PRINT(3, ("Mali GP: PLBU needs more heap memory\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_OOM); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_HANG) { /* we mask hang interrupts, so this should never happen... */ MALI_DEBUG_ASSERT( 0 ); } /* The only way to get here is if we only got one of two needed END_CMD_LST * interrupts. Disable the interrupt that has been received and continue to * run. */ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED & ((irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST) ? ~MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST : ~MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST )); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif }
static void mali_gp_bottom_half(void *data) { struct mali_gp_core *core = (struct mali_gp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0); #endif #endif mali_group_lock(core->group); if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali GP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) { u32 core_status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS); if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(4, ("Mali GP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_COMPLETED); return; } } irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM); if (0 != irq_errors) { mali_gp_post_process_job(core, MALI_FALSE); MALI_PRINT_ERROR(("Mali GP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_FAILED); return; } else if (MALI_TRUE == core->core_timed_out) { if (core->timeout_job_id == mali_gp_job_get_id(core->running_job)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out\n", mali_gp_job_get_id(core->running_job))); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_TIMED_OUT); } else { MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out but current job is %d\n", core->timeout_job_id, mali_gp_job_get_id(core->running_job))); mali_group_unlock(core->group); } core->core_timed_out = MALI_FALSE; return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) { mali_gp_post_process_job(core, MALI_TRUE); MALI_DEBUG_PRINT(3, ("Mali GP: PLBU needs more heap memory\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_OOM); return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_HANG) { mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_HANG); } mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0); #endif #endif }
struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s __user *uargs, u32 id) { struct mali_pp_job *job; u32 perf_counter_flag; job = _mali_osk_calloc(1, sizeof(struct mali_pp_job)); if (NULL != job) { if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) { goto fail; } if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) { MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n")); goto fail; } if (!mali_pp_job_use_no_notification(job)) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s)); if (NULL == job->finished_notification) goto fail; } perf_counter_flag = mali_pp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count); /* These counters apply for all virtual jobs, and where no per sub job counter is specified */ job->uargs.perf_counter_src0 = pp_counter_src0; job->uargs.perf_counter_src1 = pp_counter_src1; /* We only copy the per sub job array if it is enabled with at least one counter */ if (0 < sub_job_count) { job->perf_counter_per_sub_job_count = sub_job_count; _mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0)); _mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1)); } } _mali_osk_list_init(&job->list); job->session = session; _mali_osk_list_init(&job->session_list); job->id = id; job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); job->num_memory_cookies = job->uargs.num_memory_cookies; if (job->num_memory_cookies > 0) { u32 size; u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies; if (job->uargs.num_memory_cookies > session->descriptor_mapping->current_nr_mappings) { MALI_PRINT_ERROR(("Mali PP job: Too many memory cookies specified in job object\n")); goto fail; } size = sizeof(*memory_cookies) * job->num_memory_cookies; job->memory_cookies = _mali_osk_malloc(size); if (NULL == job->memory_cookies) { MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size)); goto fail; } if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) { MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size)); goto fail; } #if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) job->num_dma_bufs = job->num_memory_cookies; job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *)); if (NULL == job->dma_bufs) { MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n")); goto fail; } #endif } /* Prepare DMA command buffer to start job, if it is virtual. */ if (mali_pp_job_is_virtual_group_job(job)) { struct mali_pp_core *core; _mali_osk_errcode_t err = mali_dma_get_cmd_buf(&job->dma_cmd_buf); if (_MALI_OSK_ERR_OK != err) { MALI_PRINT_ERROR(("Mali PP job: Failed to allocate DMA command buffer\n")); goto fail; } core = mali_pp_scheduler_get_virtual_pp(); MALI_DEBUG_ASSERT_POINTER(core); mali_pp_job_dma_cmd_prepare(core, job, 0, &job->dma_cmd_buf); } if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) { /* Not a valid job. */ goto fail; } mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job); mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence)); return job; } fail: if (NULL != job) { mali_pp_job_delete(job); } return NULL; }
struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker) { struct mali_gp_job *job; u32 perf_counter_flag; job = _mali_osk_malloc(sizeof(struct mali_gp_job)); if (NULL != job) { job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s)); if (NULL == job->finished_notification) { _mali_osk_free(job); return NULL; } job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s)); if (NULL == job->oom_notification) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_free(job); return NULL; } if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) { _mali_osk_notification_delete(job->finished_notification); _mali_osk_notification_delete(job->oom_notification); _mali_osk_free(job); return NULL; } perf_counter_flag = mali_gp_job_get_perf_counter_flag(job); /* case when no counters came from user space * so pass the debugfs / DS-5 provided global ones to the job object */ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0()); mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1()); } _mali_osk_list_init(&job->list); job->session = session; job->id = id; job->heap_current_addr = job->uargs.frame_registers[4]; job->perf_counter_value0 = 0; job->perf_counter_value1 = 0; job->pid = _mali_osk_get_pid(); job->tid = _mali_osk_get_tid(); job->pp_tracker = pp_tracker; if (NULL != job->pp_tracker) { /* Take a reference on PP job's tracker that will be released when the GP job is done. */ mali_timeline_system_tracker_get(session->timeline_system, pp_tracker); } mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_GP, NULL, job); mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence)); return job; } return NULL; }
static void mali_pp_bottom_half(void *data) { struct mali_pp_core *core = (struct mali_pp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); #endif #endif mali_group_lock(core->group); if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & MALI200_REG_VAL_IRQ_END_OF_FRAME) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_COMPLETED); return; } irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG); if (0 != irq_errors) { mali_pp_post_process_job(core); MALI_PRINT_ERROR(("Mali PP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_FAILED); return; } else if (MALI_TRUE == core->core_timed_out) { if (core->timeout_job_id == mali_pp_job_get_id(core->running_job)) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(2, ("Mali PP: Job %d timed out on core %s\n", mali_pp_job_get_id(core->running_job), core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_TIMED_OUT); } else { mali_group_unlock(core->group); } core->core_timed_out = MALI_FALSE; return; } else if (irq_readout & MALI200_REG_VAL_IRQ_HANG) { mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG); } mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); #endif #endif }