_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args) { struct mali_session_data *session; struct mali_gp_job *resumed_job; _mali_osk_notification_t *new_notification = 0; MALI_DEBUG_ASSERT_POINTER(args); if (NULL == args->ctx) { return _MALI_OSK_ERR_INVALID_ARGS; } session = (struct mali_session_data*)args->ctx; if (NULL == session) { return _MALI_OSK_ERR_FAULT; } if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) { new_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s)); if (NULL == new_notification) { MALI_PRINT_ERROR(("Mali GP scheduler: Failed to allocate notification object. Will abort GP job.\n")); mali_group_lock(slot.group); mali_group_abort_gp_job(slot.group, args->cookie); mali_group_unlock(slot.group); return _MALI_OSK_ERR_FAULT; } } mali_group_lock(slot.group); if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) { MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Resuming job %u with new heap; 0x%08X - 0x%08X\n", args->cookie, args->arguments[0], args->arguments[1])); resumed_job = mali_group_resume_gp_with_new_heap(slot.group, args->cookie, args->arguments[0], args->arguments[1]); if (NULL != resumed_job) { resumed_job->oom_notification = new_notification; mali_group_unlock(slot.group); return _MALI_OSK_ERR_OK; } else { mali_group_unlock(slot.group); _mali_osk_notification_delete(new_notification); return _MALI_OSK_ERR_FAULT; } } MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting job %u, no new heap provided\n", args->cookie)); mali_group_abort_gp_job(slot.group, args->cookie); mali_group_unlock(slot.group); return _MALI_OSK_ERR_OK; }
static void mali_gp_scheduler_schedule(void) { struct mali_gp_job *job; mali_gp_scheduler_lock(); if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue)) { MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n", pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0)); mali_gp_scheduler_unlock(); return; /* Nothing to do, so early out */ } /* Get (and remove) next job in queue */ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list); _mali_osk_list_del(&job->list); /* Mark slot as busy */ slot.state = MALI_GP_SLOT_STATE_WORKING; mali_gp_scheduler_unlock(); MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job)); mali_group_lock(slot.group); if (_MALI_OSK_ERR_OK != mali_group_start_gp_job(slot.group, job)) { MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Failed to start GP job\n")); MALI_DEBUG_ASSERT(0); /* this cant fail on Mali-300+, no need to implement put back of job */ } mali_group_unlock(slot.group); }
static void mali_mmu_bottom_half(void * data) { struct mali_mmu_core *mmu = (struct mali_mmu_core*)data; u32 raw, status, fault_address; MALI_DEBUG_ASSERT_POINTER(mmu); MALI_DEBUG_PRINT(2, ("MMU Page fault bottom half: Locking subsystems\n")); mali_group_lock(mmu->group); /* Unlocked in mali_group_bottom_half */ raw = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT); status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if ( (0==(raw & MALI_MMU_INTERRUPT_PAGE_FAULT)) && (0==(status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)) ) { MALI_DEBUG_PRINT(1, ("MMU: Page fault bottom half: No Irq found.\n")); mali_group_unlock(mmu->group); return; } /* An actual page fault has occurred. */ fault_address = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR); MALI_PRINT(("Page fault detected at 0x%x from bus id %d of type %s on %s\n", (void*)fault_address, (status >> 6) & 0x1F, (status & 32) ? "write" : "read", mmu->hw_core.description)); mali_group_bottom_half(mmu->group, GROUP_EVENT_MMU_PAGE_FAULT); /* Unlocks the group lock */ }
void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session) { struct mali_gp_job *gp_job; struct mali_pp_job *pp_job; u32 gp_job_id = 0; u32 pp_job_id = 0; mali_bool abort_pp = MALI_FALSE; mali_bool abort_gp = MALI_FALSE; mali_group_lock(group); gp_job = group->gp_running_job; pp_job = group->pp_running_job; if (gp_job && mali_gp_job_get_session(gp_job) == session) { MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session)); gp_job_id = mali_gp_job_get_id(gp_job); abort_gp = MALI_TRUE; } if (pp_job && mali_pp_job_get_session(pp_job) == session) { MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session)); pp_job_id = mali_pp_job_get_id(pp_job); abort_pp = MALI_TRUE; } mali_group_unlock(group); /* These functions takes and releases the group lock */ if (0 != abort_gp) { mali_group_abort_gp_job(group, gp_job_id); } if (0 != abort_pp) { mali_group_abort_pp_job(group, pp_job_id); } mali_group_lock(group); mali_group_remove_session_if_unused(group, session); mali_group_unlock(group); }
void mali_group_abort_pp_job(struct mali_group *group, u32 job_id) { mali_group_lock(group); if (group->pp_state == MALI_GROUP_CORE_STATE_IDLE || mali_pp_job_get_id(group->pp_running_job) != job_id) { mali_group_unlock(group); return; /* No need to cancel or job has already been aborted or completed */ } mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_FALSE); /* Will release group lock */ }
static void mali_mmu_bottom_half(void * data) { struct mali_mmu_core *mmu = (struct mali_mmu_core*)data; u32 raw, status, fault_address; MALI_DEBUG_ASSERT_POINTER(mmu); MALI_DEBUG_PRINT(3, ("Mali MMU: Page fault bottom half: Locking subsystems\n")); #ifdef CONFIG_ARCH_MESON6 if (mmu->id == 2) { malifix_set_mmu_int_process_state(0, MMU_INT_NONE); } else if (mmu->id == 3) { malifix_set_mmu_int_process_state(1, MMU_INT_NONE); } #endif mali_group_lock(mmu->group); /* Unlocked in mali_group_bottom_half */ if ( MALI_FALSE == mali_group_power_is_on(mmu->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.",mmu->hw_core.description)); mali_group_unlock(mmu->group); return; } raw = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT); status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if ( (0==(raw & MALI_MMU_INTERRUPT_PAGE_FAULT)) && (0==(status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)) ) { MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault bottom half: No Irq found.\n")); mali_group_unlock(mmu->group); /* MALI_DEBUG_ASSERT(0); */ return; } /* An actual page fault has occurred. */ fault_address = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR); MALI_DEBUG_PRINT(2,("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n", (void*)fault_address, (status >> 6) & 0x1F, (status & 32) ? "write" : "read", mmu->hw_core.description)); mali_group_bottom_half(mmu->group, GROUP_EVENT_MMU_PAGE_FAULT); /* Unlocks the group lock */ }
void mali_group_power_on(void) { int i; for (i = 0; i < mali_global_num_groups; i++) { struct mali_group *group = mali_global_groups[i]; mali_group_lock(group); MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state); MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state); MALI_DEBUG_ASSERT_POINTER(group->cluster); group->power_is_on = MALI_TRUE; mali_cluster_power_is_enabled_set(group->cluster, MALI_TRUE); mali_group_unlock(group); } MALI_DEBUG_PRINT(3,("group: POWER ON\n")); }
void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr) { mali_group_lock(group); if (group->gp_state != MALI_GROUP_CORE_STATE_OOM || mali_gp_job_get_id(group->gp_running_job) != job_id) { mali_group_unlock(group); return; /* Illegal request or job has already been aborted */ } mali_cluster_l2_cache_invalidate_all_force(group->cluster); mali_mmu_zap_tlb_without_stall(group->mmu); mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr); group->gp_state = MALI_GROUP_CORE_STATE_WORKING; mali_group_unlock(group); }
_mali_osk_errcode_t mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job) { struct mali_session_data *session; enum mali_group_activate_pd_status activate_status; MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state); mali_pm_core_event(MALI_CORE_EVENT_PP_START); session = mali_pp_job_get_session(job); mali_group_lock(group); mali_cluster_l2_cache_invalidate_all(group->cluster, mali_pp_job_get_id(job)); activate_status = mali_group_activate_page_directory(group, session); if (MALI_GROUP_ACTIVATE_PD_STATUS_FAILED != activate_status) { /* if session is NOT kept Zapping is done as part of session switch */ if (MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD == activate_status) { MALI_DEBUG_PRINT(3, ("PP starting job PD_Switch 0 Flush 1 Zap 1\n")); mali_mmu_zap_tlb_without_stall(group->mmu); } mali_pp_job_start(group->pp_core, job, sub_job); group->pp_running_job = job; group->pp_running_sub_job = sub_job; group->pp_state = MALI_GROUP_CORE_STATE_WORKING; mali_group_unlock(group); return _MALI_OSK_ERR_OK; } #if defined(USING_MALI200) group->pagedir_activation_failed = MALI_TRUE; #endif mali_group_unlock(group); mali_pm_core_event(MALI_CORE_EVENT_PP_STOP); /* Failed to start, so "cancel" the MALI_CORE_EVENT_PP_START */ return _MALI_OSK_ERR_FAULT; }
void mali_group_power_off(void) { int i; /* It is necessary to set group->session = NULL; so that the powered off MMU is not written to on map /unmap */ /* It is necessary to set group->power_is_on=MALI_FALSE so that pending bottom_halves does not access powered off cores. */ for (i = 0; i < mali_global_num_groups; i++) { struct mali_group *group = mali_global_groups[i]; mali_group_lock(group); MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state); MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state); MALI_DEBUG_ASSERT_POINTER(group->cluster); group->session = NULL; MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); group->power_is_on = MALI_FALSE; mali_cluster_power_is_enabled_set(group->cluster, MALI_FALSE); mali_group_unlock(group); } MALI_DEBUG_PRINT(3,("group: POWER OFF\n")); }
/* Called from mali_cluster_reset() when the system is re-turned on */ void mali_group_reset(struct mali_group *group) { mali_group_lock(group); group->session = NULL; if (NULL != group->mmu) { mali_mmu_reset(group->mmu); } if (NULL != group->gp_core) { mali_gp_reset(group->gp_core); } if (NULL != group->pp_core) { mali_pp_reset(group->pp_core); } mali_group_unlock(group); }
static void mali_pp_bottom_half(void *data) { struct mali_pp_core *core = (struct mali_pp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); #endif #endif mali_group_lock(core->group); if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & MALI200_REG_VAL_IRQ_END_OF_FRAME) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_COMPLETED); return; } irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG); if (0 != irq_errors) { mali_pp_post_process_job(core); MALI_PRINT_ERROR(("Mali PP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_FAILED); return; } else if (MALI_TRUE == core->core_timed_out) { if (core->timeout_job_id == mali_pp_job_get_id(core->running_job)) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(2, ("Mali PP: Job %d timed out on core %s\n", mali_pp_job_get_id(core->running_job), core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_TIMED_OUT); } else { mali_group_unlock(core->group); } core->core_timed_out = MALI_FALSE; return; } else if (irq_readout & MALI200_REG_VAL_IRQ_HANG) { mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG); } mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); #endif #endif }
_mali_osk_errcode_t mali_pp_scheduler_initialize(void) { struct mali_group *group; struct mali_pp_core *pp_core; _mali_osk_lock_flags_t lock_flags; u32 num_groups; u32 i; #if defined(MALI_UPPER_HALF_SCHEDULING) lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE; #else lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE; #endif _MALI_OSK_INIT_LIST_HEAD(&job_queue); _MALI_OSK_INIT_LIST_HEAD(&group_list_working); _MALI_OSK_INIT_LIST_HEAD(&group_list_idle); _MALI_OSK_INIT_LIST_HEAD(&virtual_job_queue); pp_scheduler_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER); if (NULL == pp_scheduler_lock) { return _MALI_OSK_ERR_NOMEM; } pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init(); if (NULL == pp_scheduler_working_wait_queue) { _mali_osk_lock_term(pp_scheduler_lock); return _MALI_OSK_ERR_NOMEM; } pp_scheduler_wq_schedule = _mali_osk_wq_create_work(mali_pp_scheduler_do_schedule, NULL); if (NULL == pp_scheduler_wq_schedule) { _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue); _mali_osk_lock_term(pp_scheduler_lock); return _MALI_OSK_ERR_NOMEM; } #if defined(MALI_PP_SCHEDULER_USE_DEFERRED_JOB_DELETE) pp_scheduler_wq_job_delete = _mali_osk_wq_create_work(mali_pp_scheduler_do_job_delete, NULL); if (NULL == pp_scheduler_wq_job_delete) { _mali_osk_wq_delete_work(pp_scheduler_wq_schedule); _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue); _mali_osk_lock_term(pp_scheduler_lock); return _MALI_OSK_ERR_NOMEM; } pp_scheduler_job_delete_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED); if (NULL == pp_scheduler_job_delete_lock) { _mali_osk_wq_delete_work(pp_scheduler_wq_job_delete); _mali_osk_wq_delete_work(pp_scheduler_wq_schedule); _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue); _mali_osk_lock_term(pp_scheduler_lock); return _MALI_OSK_ERR_NOMEM; } #endif num_groups = mali_group_get_glob_num_groups(); /* Do we have a virtual group? */ for (i = 0; i < num_groups; i++) { group = mali_group_get_glob_group(i); if (mali_group_is_virtual(group)) { MALI_DEBUG_PRINT(3, ("Found virtual group %p\n", group)); virtual_group = group; break; } } /* Find all the available PP cores */ for (i = 0; i < num_groups; i++) { group = mali_group_get_glob_group(i); pp_core = mali_group_get_pp_core(group); if (NULL != pp_core && !mali_group_is_virtual(group)) { if (0 == pp_version) { /* Retrieve PP version from the first available PP core */ pp_version = mali_pp_core_get_version(pp_core); } if (NULL != virtual_group) { /* Add all physical PP cores to the virtual group */ mali_group_lock(virtual_group); group->state = MALI_GROUP_STATE_JOINING_VIRTUAL; mali_group_add_group(virtual_group, group); mali_group_unlock(virtual_group); } else { _mali_osk_list_add(&group->pp_scheduler_list, &group_list_idle); } num_cores++; } } return _MALI_OSK_ERR_OK; }
struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group) { struct mali_pp_core* core = NULL; MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description)); MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base)); if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES) { MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n")); return NULL; } core = _mali_osk_malloc(sizeof(struct mali_pp_core)); if (NULL != core) { core->group = group; core->core_id = mali_global_num_pp_cores; core->running_job = NULL; core->counter_src0 = MALI_HW_CORE_NO_COUNTER; core->counter_src1 = MALI_HW_CORE_NO_COUNTER; core->counter_src0_used = MALI_HW_CORE_NO_COUNTER; core->counter_src1_used = MALI_HW_CORE_NO_COUNTER; if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK)) { _mali_osk_errcode_t ret; mali_group_lock(group); ret = mali_pp_reset(core); mali_group_unlock(group); if (_MALI_OSK_ERR_OK == ret) { /* Setup IRQ handlers (which will do IRQ probing if needed) */ core->irq = _mali_osk_irq_init(resource->irq, mali_pp_upper_half, mali_pp_bottom_half, mali_pp_irq_probe_trigger, mali_pp_irq_probe_ack, core, "mali_pp_irq_handlers"); if (NULL != core->irq) { /* Initialise the timeout timer */ core->timeout_timer = _mali_osk_timer_init(); if(NULL != core->timeout_timer) { _mali_osk_timer_setcallback(core->timeout_timer, mali_pp_timeout, (void *)core); mali_global_pp_cores[mali_global_num_pp_cores] = core; mali_global_num_pp_cores++; return core; } else { MALI_PRINT_ERROR(("Failed to setup timeout timer for PP core %s\n", core->hw_core.description)); /* Release IRQ handlers */ _mali_osk_irq_term(core->irq); } } else { MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description)); } } mali_hw_core_delete(&core->hw_core); } _mali_osk_free(core); } else { MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n")); } return NULL; }
static void mali_pp_bottom_half(void *data) { struct mali_pp_core *core = (struct mali_pp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 /* Bottom half TLP logging is currently not supported */ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); #endif #endif mali_group_lock(core->group); /* Group lock grabbed in core handlers, but released in common group handler */ if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & MALI200_REG_VAL_IRQ_END_OF_FRAME) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_COMPLETED); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } /* * Now lets look at the possible error cases (IRQ indicating error or timeout) * END_OF_FRAME and HANG interrupts are not considered error. */ irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG); if (0 != irq_errors) { mali_pp_post_process_job(core); MALI_PRINT_ERROR(("Mali PP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_FAILED); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } else if (MALI_TRUE == core->core_timed_out) /* SW timeout */ { if (core->timeout_job_id == mali_pp_job_get_id(core->running_job)) { mali_pp_post_process_job(core); MALI_DEBUG_PRINT(2, ("Mali PP: Job %d timed out on core %s\n", mali_pp_job_get_id(core->running_job), core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_TIMED_OUT); /* Will release group lock */ } else { mali_group_unlock(core->group); } core->core_timed_out = MALI_FALSE; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif return; } else if (irq_readout & MALI200_REG_VAL_IRQ_HANG) { /* Just ignore hang interrupts, the job timer will detect hanging jobs anyways */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG); } /* * The only way to get here is if we got a HANG interrupt, which we ignore. * Re-enable interrupts and let core continue to run */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 /* Bottom half TLP logging is currently not supported */ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif #endif }
static void mali_gp_bottom_half(void *data) { struct mali_gp_core *core = (struct mali_gp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0); #endif #endif mali_group_lock(core->group); if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali GP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) { u32 core_status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS); if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(4, ("Mali GP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_COMPLETED); return; } } irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM); if (0 != irq_errors) { mali_gp_post_process_job(core, MALI_FALSE); MALI_PRINT_ERROR(("Mali GP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_FAILED); return; } else if (MALI_TRUE == core->core_timed_out) { if (core->timeout_job_id == mali_gp_job_get_id(core->running_job)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out\n", mali_gp_job_get_id(core->running_job))); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_TIMED_OUT); } else { MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out but current job is %d\n", core->timeout_job_id, mali_gp_job_get_id(core->running_job))); mali_group_unlock(core->group); } core->core_timed_out = MALI_FALSE; return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) { mali_gp_post_process_job(core, MALI_TRUE); MALI_DEBUG_PRINT(3, ("Mali GP: PLBU needs more heap memory\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_OOM); return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_HANG) { mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_HANG); } mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED #if 0 _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0); #endif #endif }
struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group) { struct mali_gp_core* core = NULL; MALI_DEBUG_ASSERT(NULL == mali_global_gp_core); MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description)); core = _mali_osk_malloc(sizeof(struct mali_gp_core)); if (NULL != core) { core->group = group; core->running_job = NULL; core->counter_src0 = MALI_HW_CORE_NO_COUNTER; core->counter_src1 = MALI_HW_CORE_NO_COUNTER; core->counter_src0_used = MALI_HW_CORE_NO_COUNTER; core->counter_src1_used = MALI_HW_CORE_NO_COUNTER; if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE)) { _mali_osk_errcode_t ret; mali_group_lock(group); ret = mali_gp_reset(core); mali_group_unlock(group); if (_MALI_OSK_ERR_OK == ret) { core->irq = _mali_osk_irq_init(resource->irq, mali_gp_upper_half, mali_gp_bottom_half, mali_gp_irq_probe_trigger, mali_gp_irq_probe_ack, core, "mali_gp_irq_handlers"); if (NULL != core->irq) { core->timeout_timer = _mali_osk_timer_init(); if(NULL != core->timeout_timer) { _mali_osk_timer_setcallback(core->timeout_timer, mali_gp_timeout, (void *)core); MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core)); mali_global_gp_core = core; return core; } else { MALI_PRINT_ERROR(("Failed to setup timeout timer for GP core %s\n", core->hw_core.description)); _mali_osk_irq_term(core->irq); } } else { MALI_PRINT_ERROR(("Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description)); } } mali_hw_core_delete(&core->hw_core); } _mali_osk_free(core); } else { MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n")); } return NULL; }
static void mali_gp_bottom_half(void *data) { struct mali_gp_core *core = (struct mali_gp_core *)data; u32 irq_readout; u32 irq_errors; #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0); #endif mali_group_lock(core->group); /* Group lock grabbed in core handlers, but released in common group handler */ if ( MALI_FALSE == mali_group_power_is_on(core->group) ) { MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description)); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif return; } irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED; MALI_DEBUG_PRINT(4, ("Mali GP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description)); if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) { u32 core_status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS); if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(4, ("Mali GP: Job completed, calling group handler\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_COMPLETED); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif return; } } /* * Now lets look at the possible error cases (IRQ indicating error or timeout) * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error. */ irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM); if (0 != irq_errors) { mali_gp_post_process_job(core, MALI_FALSE); MALI_PRINT_ERROR(("Mali GP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description)); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_FAILED); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif return; } else if (MALI_TRUE == core->core_timed_out) /* SW timeout */ { if (core->timeout_job_id == mali_gp_job_get_id(core->running_job)) { mali_gp_post_process_job(core, MALI_FALSE); MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out\n", mali_gp_job_get_id(core->running_job))); mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_TIMED_OUT); } core->core_timed_out = MALI_FALSE; #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) { /* GP wants more memory in order to continue. * * This must be handled prior to HANG because this actually can * generate a HANG while waiting for more memory. * And it must be handled before the completion interrupts, * since the PLBU can run out of memory after VS is complete; * in which case the OOM must be handled before to complete the * PLBU work. */ mali_gp_post_process_job(core, MALI_TRUE); MALI_DEBUG_PRINT(3, ("Mali GP: PLBU needs more heap memory\n")); mali_group_bottom_half(core->group, GROUP_EVENT_GP_OOM); /* Will release group lock */ #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif return; } else if (irq_readout & MALIGP2_REG_VAL_IRQ_HANG) { /* we mask hang interrupts, so this should never happen... */ MALI_DEBUG_ASSERT( 0 ); } /* The only way to get here is if we only got one of two needed END_CMD_LST * interrupts. Disable the interrupt that has been received and continue to * run. */ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED & ((irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST) ? ~MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST : ~MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST )); mali_group_unlock(core->group); #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_EVENT_CHANNEL_SOFTWARE|MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0); #endif }