void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu) { mali_bool stall_success; MALI_DEBUG_ASSERT_POINTER(mmu); MALI_ASSERT_GROUP_LOCKED(mmu->group); MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description)); stall_success = mali_mmu_enable_stall(mmu); /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */ mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory); if ( MALI_TRUE==stall_success ) mali_mmu_disable_stall(mmu); }
mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu) { const int max_loop_count = 100; const int delay_in_usecs = 999; int i; u32 mmu_status; /* There are no group when it is called from mali_mmu_create */ if ( mmu->group ) MALI_ASSERT_GROUP_LOCKED(mmu->group); mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED) ) { MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enebled.\n")); return MALI_TRUE; } if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) { MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n")); return MALI_FALSE; } mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL); for (i = 0; i < max_loop_count; ++i) { mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if ( mmu_status & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)) { break; } if ( 0 == (mmu_status & ( MALI_MMU_STATUS_BIT_PAGING_ENABLED ))) { break; } _mali_osk_time_ubusydelay(delay_in_usecs); } if (max_loop_count == i) { MALI_PRINT_ERROR(("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); return MALI_FALSE; } if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) { MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it has a pagefault.\n")); return MALI_FALSE; } return MALI_TRUE; }
static void mali_group_deactivate_page_directory(struct mali_group *group, struct mali_session_data *session) { MALI_ASSERT_GROUP_LOCKED(group); MALI_DEBUG_ASSERT(0 < group->page_dir_ref_count); MALI_DEBUG_ASSERT(session == group->session); group->page_dir_ref_count--; /* As an optimization, the MMU still points to the group->session even if (0 == group->page_dir_ref_count), and we do not call mali_mmu_activate_empty_page_directory(group->mmu); */ MALI_DEBUG_ASSERT(0 <= group->page_dir_ref_count); }
void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu) { mali_bool stall_success; MALI_DEBUG_ASSERT_POINTER(mmu); MALI_ASSERT_GROUP_LOCKED(mmu->group); MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description)); stall_success = mali_mmu_enable_stall(mmu); /* This function can only be called when the core is idle, so it could not fail. */ MALI_DEBUG_ASSERT( stall_success ); mali_mmu_activate_address_space(mmu, mali_empty_page_directory); mali_mmu_disable_stall(mmu); }
mali_bool mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir) { mali_bool stall_success; MALI_DEBUG_ASSERT_POINTER(mmu); MALI_ASSERT_GROUP_LOCKED(mmu->group); MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description)); stall_success = mali_mmu_enable_stall(mmu); if ( MALI_FALSE==stall_success ) return MALI_FALSE; mali_mmu_activate_address_space(mmu, pagedir->page_directory); mali_mmu_disable_stall(mmu); return MALI_TRUE; }
/* Used to check if scheduler for the other core type needs to be called on job completion. * * Used only for Mali-200, where job start may fail if the only MMU is busy * with another session's address space. */ static inline mali_bool mali_group_other_reschedule_needed(struct mali_group *group) { MALI_ASSERT_GROUP_LOCKED(group); #if defined(USING_MALI200) if (group->pagedir_activation_failed) { group->pagedir_activation_failed = MALI_FALSE; return MALI_TRUE; } else #endif { return MALI_FALSE; } }
/** * Checks if the criteria is met for removing a physical core from virtual group */ MALI_STATIC_INLINE mali_bool mali_pp_scheduler_can_move_virtual_to_physical(void) { MALI_ASSERT_PP_SCHEDULER_LOCKED(); MALI_DEBUG_ASSERT(NULL != virtual_group); MALI_ASSERT_GROUP_LOCKED(virtual_group); /* * The criteria for taking out a physical group from a virtual group are the following: * - There virtual group is idle * - There are currently no physical groups (idle and working) * - There are physical jobs to be scheduled (without a barrier) */ return (!virtual_group_working) && _mali_osk_list_empty(&group_list_idle) && _mali_osk_list_empty(&group_list_working) && (NULL != mali_pp_scheduler_get_physical_job()); }
static enum mali_group_activate_pd_status mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session) { enum mali_group_activate_pd_status retval; MALI_ASSERT_GROUP_LOCKED(group); MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group)); MALI_DEBUG_ASSERT(0 <= group->page_dir_ref_count); if (0 != group->page_dir_ref_count) { if (group->session != session) { MALI_DEBUG_PRINT(4, ("Mali group: Activating session FAILED: 0x%08x on group 0x%08X. Existing session: 0x%08x\n", session, group, group->session)); return MALI_GROUP_ACTIVATE_PD_STATUS_FAILED; } else { MALI_DEBUG_PRINT(4, ("Mali group: Activating session already activated: 0x%08x on group 0x%08X. New Ref: %d\n", session, group, 1+group->page_dir_ref_count)); retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD; } } else { /* There might be another session here, but it is ok to overwrite it since group->page_dir_ref_count==0 */ if (group->session != session) { mali_bool activate_success; MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X. Ref: %d\n", session, group->session, group, 1+group->page_dir_ref_count)); activate_success = mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session)); MALI_DEBUG_ASSERT(activate_success); if ( MALI_FALSE== activate_success ) return MALI_FALSE; group->session = session; retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_SWITCHED_PD; } else { MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X. Ref: %d\n", session->page_directory, group, 1+group->page_dir_ref_count)); retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD; } } group->page_dir_ref_count++; return retval; }
mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu) { mali_bool stall_success; MALI_ASSERT_GROUP_LOCKED(mmu->group); stall_success = mali_mmu_enable_stall(mmu); mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE); if (MALI_FALSE == stall_success) { /* False means that it is in Pagefault state. Not possible to disable_stall then */ return MALI_FALSE; } mali_mmu_disable_stall(mmu); return MALI_TRUE; }
void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session) { MALI_ASSERT_GROUP_LOCKED(group); if (0 == group->page_dir_ref_count) { MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state); MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state); if (group->session == session) { MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group)); mali_mmu_activate_empty_page_directory(group->mmu); group->session = NULL; } } }
/* group lock need to be taken before calling mali_group_bottom_half */ void mali_group_bottom_half(struct mali_group *group, enum mali_group_event_t event) { MALI_ASSERT_GROUP_LOCKED(group); switch (event) { case GROUP_EVENT_PP_JOB_COMPLETED: mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_TRUE); /* PP job SUCCESS */ /* group lock is released by mali_group_complete_jobs() call above */ break; case GROUP_EVENT_PP_JOB_FAILED: mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_FALSE); /* PP job FAIL */ /* group lock is released by mali_group_complete_jobs() call above */ break; case GROUP_EVENT_PP_JOB_TIMED_OUT: mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_FALSE); /* PP job TIMEOUT */ /* group lock is released by mali_group_complete_jobs() call above */ break; case GROUP_EVENT_GP_JOB_COMPLETED: mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_TRUE); /* GP job SUCCESS */ /* group lock is released by mali_group_complete_jobs() call above */ break; case GROUP_EVENT_GP_JOB_FAILED: mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_FALSE); /* GP job FAIL */ /* group lock is released by mali_group_complete_jobs() call above */ break; case GROUP_EVENT_GP_JOB_TIMED_OUT: mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_FALSE); /* GP job TIMEOUT */ /* group lock is released by mali_group_complete_jobs() call above */ break; case GROUP_EVENT_GP_OOM: group->gp_state = MALI_GROUP_CORE_STATE_OOM; mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */ mali_gp_scheduler_oom(group, group->gp_running_job); break; case GROUP_EVENT_MMU_PAGE_FAULT: mali_group_complete_jobs(group, MALI_TRUE, MALI_TRUE, MALI_FALSE); /* GP and PP job FAIL */ /* group lock is released by mali_group_complete_jobs() call above */ break; default: break; } }
_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core) { const int reset_finished_loop_count = 15; const u32 reset_invalid_value = 0xC0FFE000; const u32 reset_check_value = 0xC01A0000; int i; MALI_DEBUG_ASSERT_POINTER(core); MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description)); MALI_ASSERT_GROUP_LOCKED(core->group); mali_pp_post_process_job(core); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET); for (i = 0; i < reset_finished_loop_count; i++) { mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value); if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW)) { break; } _mali_osk_time_ubusydelay(10); } if (i == reset_finished_loop_count) { MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n")); } mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); return _MALI_OSK_ERR_OK; }
void mali_mmu_disable_stall(struct mali_mmu_core *mmu) { const int max_loop_count = 100; const int delay_in_usecs = 1; int i; u32 mmu_status; /* There are no group when it is called from mali_mmu_create */ if ( mmu->group ) MALI_ASSERT_GROUP_LOCKED(mmu->group); mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED )) { MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n")); return; } if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n")); return; } mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL); for (i = 0; i < max_loop_count; ++i) { u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); if ( 0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) ) { break; } if ( status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) { break; } if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED )) { break; } _mali_osk_time_ubusydelay(delay_in_usecs); } if (max_loop_count == i) MALI_DEBUG_PRINT(1,("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); }
_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core) { /* Bus must be stopped before calling this function */ const int reset_finished_loop_count = 15; const u32 reset_invalid_value = 0xC0FFE000; const u32 reset_check_value = 0xC01A0000; int i; MALI_DEBUG_ASSERT_POINTER(core); MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description)); MALI_ASSERT_GROUP_LOCKED(core->group); mali_pp_post_process_job(core); /* @@@@?is there some cases where it is unsafe to post process the job here? */ /* Set register to a bogus value. The register will be used to detect when reset is complete */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value); /* Force core to reset */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET); /* Wait for reset to be complete */ for (i = 0; i < reset_finished_loop_count; i++) { mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value); if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW)) { break; } _mali_osk_time_ubusydelay(10); } if (i == reset_finished_loop_count) { MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n")); } mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */ /* Re-enable interrupts */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); return _MALI_OSK_ERR_OK; }
static void mali_group_recovery_reset(struct mali_group *group) { MALI_ASSERT_GROUP_LOCKED(group); /* Stop cores, bus stop */ if (NULL != group->pp_core) { mali_pp_stop_bus(group->pp_core); } if (NULL != group->gp_core) { mali_gp_stop_bus(group->gp_core); } /* Flush MMU */ mali_mmu_activate_fault_flush_page_directory(group->mmu); mali_mmu_page_fault_done(group->mmu); /* Wait for cores to stop bus */ if (NULL != group->pp_core) { mali_pp_stop_bus_wait(group->pp_core); } if (NULL != group->gp_core) { mali_gp_stop_bus_wait(group->gp_core); } /* Reset cores */ if (NULL != group->pp_core) { mali_pp_hard_reset(group->pp_core); } if (NULL != group->gp_core) { mali_gp_hard_reset(group->gp_core); } /* Reset MMU */ mali_mmu_reset(group->mmu); group->session = NULL; }
void mali_gp_hard_reset(struct mali_gp_core *core) { const int reset_finished_loop_count = 15; const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW; const u32 reset_invalid_value = 0xC0FFE000; const u32 reset_check_value = 0xC01A0000; const u32 reset_default_value = 0; int i; MALI_DEBUG_ASSERT_POINTER(core); MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description)); MALI_ASSERT_GROUP_LOCKED(core->group); mali_gp_post_process_job(core, MALI_FALSE); mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET); for (i = 0; i < reset_finished_loop_count; i++) { mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value); if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) { break; } } if (i == reset_finished_loop_count) { MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n")); } mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); }
void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address) { MALI_ASSERT_GROUP_LOCKED(mmu->group); mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address)); }
void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu) { MALI_ASSERT_GROUP_LOCKED(mmu->group); mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE); }
void mali_mmu_page_fault_done(struct mali_mmu_core *mmu) { MALI_ASSERT_GROUP_LOCKED(mmu->group); MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description)); mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE); }
/* ------ local helper functions below --------- */ static void mali_pp_post_process_job(struct mali_pp_core *core) { MALI_ASSERT_GROUP_LOCKED(core->group); if (NULL != core->running_job) { u32 val0 = 0; u32 val1 = 0; #if MALI_TIMELINE_PROFILING_ENABLED int counter_index = COUNTER_FP0_C0 + (2 * core->core_id); #endif if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used) { val0 = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE); if (mali_pp_job_get_perf_counter_flag(core->running_job) && _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE && mali_pp_job_get_perf_counter_src0(core->running_job) == core->counter_src0_used) { /* We retrieved the counter that user space asked for, so return the value through the job object */ mali_pp_job_set_perf_counter_value0(core->running_job, core->running_sub_job, val0); } else { /* User space asked for a counter, but this is not what we retrived (overridden by counter src set on core) */ mali_pp_job_set_perf_counter_value0(core->running_job, core->running_sub_job, MALI_HW_CORE_INVALID_VALUE); } #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_report_hw_counter(counter_index, val0); #endif } if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used) { val1 = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE); if (mali_pp_job_get_perf_counter_flag(core->running_job) && _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE && mali_pp_job_get_perf_counter_src1(core->running_job) == core->counter_src1_used) { /* We retrieved the counter that user space asked for, so return the value through the job object */ mali_pp_job_set_perf_counter_value1(core->running_job, core->running_sub_job, val1); } else { /* User space asked for a counter, but this is not what we retrived (overridden by counter src set on core) */ mali_pp_job_set_perf_counter_value1(core->running_job, core->running_sub_job, MALI_HW_CORE_INVALID_VALUE); } #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_report_hw_counter(counter_index + 1, val1); #endif } #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id), val0, val1, core->counter_src0_used | (core->counter_src1_used << 8), 0, 0); #endif /* We are no longer running a job... */ core->running_job = NULL; _mali_osk_timer_del(core->timeout_timer); } }
_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core) { int i; const int request_loop_count = 20; MALI_DEBUG_ASSERT_POINTER(core); MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description)); MALI_ASSERT_GROUP_LOCKED(core->group); mali_pp_post_process_job(core); /* @@@@?is there some cases where it is unsafe to post process the job here? */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */ #if defined(USING_MALI200) /* On Mali-200, stop the bus, then do a hard reset of the core */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS); for (i = 0; i < request_loop_count; i++) { if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED) { break; } _mali_osk_time_ubusydelay(10); } if (request_loop_count == i) { MALI_PRINT_ERROR(("Mali PP: Failed to stop bus for core %s, unable to recover\n", core->hw_core.description)); return _MALI_OSK_ERR_FAULT ; } /* the bus was stopped OK, do the hard reset */ mali_pp_hard_reset(core); #elif defined(USING_MALI400) /* Mali-300 and Mali-400 have a safe reset command which we use */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI400PP_REG_VAL_IRQ_RESET_COMPLETED); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET); for (i = 0; i < request_loop_count; i++) { if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) { break; } _mali_osk_time_ubusydelay(10); } if (request_loop_count == i) { MALI_DEBUG_PRINT(2, ("Mali PP: Failed to reset core %s, Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS))); return _MALI_OSK_ERR_FAULT; } #else #error "no supported mali core defined" #endif /* Re-enable interrupts */ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL); mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); return _MALI_OSK_ERR_OK; }
static void mali_gp_post_process_job(struct mali_gp_core *core, mali_bool suspend) { MALI_ASSERT_GROUP_LOCKED(core->group); if (NULL != core->running_job) { u32 val0 = 0; u32 val1 = 0; #if MALI_TIMELINE_PROFILING_ENABLED u32 event_id; #endif if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used) { val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE); if (mali_gp_job_get_perf_counter_flag(core->running_job) && _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE && mali_gp_job_get_perf_counter_src0(core->running_job) == core->counter_src0_used) { /* We retrieved the counter that user space asked for, so return the value through the job object */ mali_gp_job_set_perf_counter_value0(core->running_job, val0); } else { /* User space asked for a counter, but this is not what we retrived (overridden by counter src set on core) */ mali_gp_job_set_perf_counter_value0(core->running_job, MALI_HW_CORE_INVALID_VALUE); } #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_report_hw_counter(COUNTER_VP_C0, val0); #endif } if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used) { val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE); if (mali_gp_job_get_perf_counter_flag(core->running_job) && _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE && mali_gp_job_get_perf_counter_src1(core->running_job) == core->counter_src1_used) { /* We retrieved the counter that user space asked for, so return the value through the job object */ mali_gp_job_set_perf_counter_value1(core->running_job, val1); } else { /* User space asked for a counter, but this is not what we retrieved (overridden by counter src set on core) */ mali_gp_job_set_perf_counter_value1(core->running_job, MALI_HW_CORE_INVALID_VALUE); } #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_report_hw_counter(COUNTER_VP_C1, val1); #endif } #if MALI_TIMELINE_PROFILING_ENABLED if (MALI_TRUE == suspend) { event_id = MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0); } else { event_id = MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0); } _mali_osk_profiling_add_event(event_id, val0, val1, core->counter_src0_used | (core->counter_src1_used << 8), 0, 0); #endif mali_gp_job_set_current_heap_addr(core->running_job, mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR)); if (MALI_TRUE != suspend) { /* We are no longer running a job... */ core->running_job = NULL; _mali_osk_timer_del(core->timeout_timer); } } }
/* Group lock need to be taken before calling mali_group_complete_jobs. Will release the lock here. */ static void mali_group_complete_jobs(struct mali_group *group, mali_bool complete_gp, mali_bool complete_pp, bool success) { mali_bool need_group_reset = MALI_FALSE; mali_bool gp_success = success; mali_bool pp_success = success; MALI_ASSERT_GROUP_LOCKED(group); if (complete_gp && !complete_pp) { MALI_DEBUG_ASSERT_POINTER(group->gp_core); if (_MALI_OSK_ERR_OK == mali_gp_reset(group->gp_core)) { struct mali_gp_job *gp_job_to_return = group->gp_running_job; group->gp_state = MALI_GROUP_CORE_STATE_IDLE; group->gp_running_job = NULL; MALI_DEBUG_ASSERT_POINTER(gp_job_to_return); mali_group_deactivate_page_directory(group, mali_gp_job_get_session(gp_job_to_return)); if(mali_group_other_reschedule_needed(group)) { mali_group_unlock(group); mali_pp_scheduler_do_schedule(); } else { mali_group_unlock(group); } mali_gp_scheduler_job_done(group, gp_job_to_return, gp_success); mali_pm_core_event(MALI_CORE_EVENT_GP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */ return; } else { need_group_reset = MALI_TRUE; MALI_DEBUG_PRINT(3, ("Mali group: Failed to reset GP, need to reset entire group\n")); pp_success = MALI_FALSE; /* This might kill PP as well, so this should fail */ } } if (complete_pp && !complete_gp) { MALI_DEBUG_ASSERT_POINTER(group->pp_core); if (_MALI_OSK_ERR_OK == mali_pp_reset(group->pp_core)) { struct mali_pp_job *pp_job_to_return = group->pp_running_job; u32 pp_sub_job_to_return = group->pp_running_sub_job; group->pp_state = MALI_GROUP_CORE_STATE_IDLE; group->pp_running_job = NULL; MALI_DEBUG_ASSERT_POINTER(pp_job_to_return); mali_group_deactivate_page_directory(group, mali_pp_job_get_session(pp_job_to_return)); if(mali_group_other_reschedule_needed(group)) { mali_group_unlock(group); mali_gp_scheduler_do_schedule(); } else { mali_group_unlock(group); } mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, pp_success); mali_pm_core_event(MALI_CORE_EVENT_PP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */ return; } else { need_group_reset = MALI_TRUE; MALI_DEBUG_PRINT(3, ("Mali group: Failed to reset PP, need to reset entire group\n")); gp_success = MALI_FALSE; /* This might kill GP as well, so this should fail */ } } else if (complete_gp && complete_pp) { need_group_reset = MALI_TRUE; } if (MALI_TRUE == need_group_reset) { struct mali_gp_job *gp_job_to_return = group->gp_running_job; struct mali_pp_job *pp_job_to_return = group->pp_running_job; u32 pp_sub_job_to_return = group->pp_running_sub_job; mali_bool schedule_other = MALI_FALSE; MALI_DEBUG_PRINT(3, ("Mali group: Resetting entire group\n")); group->gp_state = MALI_GROUP_CORE_STATE_IDLE; group->gp_running_job = NULL; if (NULL != gp_job_to_return) { mali_group_deactivate_page_directory(group, mali_gp_job_get_session(gp_job_to_return)); } group->pp_state = MALI_GROUP_CORE_STATE_IDLE; group->pp_running_job = NULL; if (NULL != pp_job_to_return) { mali_group_deactivate_page_directory(group, mali_pp_job_get_session(pp_job_to_return)); } /* The reset has to be done after mali_group_deactivate_page_directory() */ mali_group_recovery_reset(group); if (mali_group_other_reschedule_needed(group) && (NULL == gp_job_to_return || NULL == pp_job_to_return)) { schedule_other = MALI_TRUE; } mali_group_unlock(group); if (NULL != gp_job_to_return) { mali_gp_scheduler_job_done(group, gp_job_to_return, gp_success); mali_pm_core_event(MALI_CORE_EVENT_GP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */ } else if (schedule_other) { mali_pp_scheduler_do_schedule(); } if (NULL != pp_job_to_return) { mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, pp_success); mali_pm_core_event(MALI_CORE_EVENT_PP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */ } else if (schedule_other) { mali_gp_scheduler_do_schedule(); } return; } mali_group_unlock(group); }
static void mali_gp_post_process_job(struct mali_gp_core *core, mali_bool suspend) { MALI_ASSERT_GROUP_LOCKED(core->group); if (NULL != core->running_job) { u32 val0 = 0; u32 val1 = 0; #if MALI_TIMELINE_PROFILING_ENABLED u32 event_id; #endif #if PROFILING_PRINT_L2_HITRATE_ON_GP_FINISH { u32 src0, value0, src1, value1, sum, per_thousand, per_thousand_now, diff0, diff1; static u32 print_nr=0; static u32 prev0=0; static u32 prev1=0; if ( !(++print_nr&511) ) { mali_l2_cache_core_get_counter_values(mali_l2_cache_core_get_glob_l2_core(0), &src0, &value0, &src1, &value1); MALI_DEBUG_ASSERT( src0==20 ); MALI_DEBUG_ASSERT( src1==21 ); sum = value0+value1; if ( sum > 1000000 ) { per_thousand = value0 / (sum/1000); } else { per_thousand = (value0*1000) / (sum); } diff0 = value0-prev0; diff1 = value1-prev1; sum = diff0 + diff1 ; if ( sum > 1000000 ) { per_thousand_now = diff0 / (sum/1000); } else { per_thousand_now = (diff0*1000) / (sum); } prev0=value0; prev1=value1; if (per_thousand_now<=1000) { MALI_DEBUG_PRINT(2, ("Mali L2: Read hits/misses: %d/%d = %d thousand_parts total, since previous: %d\n", value0, value1, per_thousand, per_thousand_now)); } } } #endif if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used) { val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE); if (mali_gp_job_get_perf_counter_flag(core->running_job) && _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE && mali_gp_job_get_perf_counter_src0(core->running_job) == core->counter_src0_used) { mali_gp_job_set_perf_counter_value0(core->running_job, val0); } else { mali_gp_job_set_perf_counter_value0(core->running_job, MALI_HW_CORE_INVALID_VALUE); } #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_report_hw_counter(COUNTER_VP_C0, val0); #endif } if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used) { val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE); if (mali_gp_job_get_perf_counter_flag(core->running_job) && _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE && mali_gp_job_get_perf_counter_src1(core->running_job) == core->counter_src1_used) { mali_gp_job_set_perf_counter_value1(core->running_job, val1); } else { mali_gp_job_set_perf_counter_value1(core->running_job, MALI_HW_CORE_INVALID_VALUE); } #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_report_hw_counter(COUNTER_VP_C1, val1); #endif } #if MALI_TIMELINE_PROFILING_ENABLED if (MALI_TRUE == suspend) { event_id = MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0); } else { event_id = MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0); } _mali_osk_profiling_add_event(event_id, val0, val1, core->counter_src0_used | (core->counter_src1_used << 8), 0, 0); #endif mali_gp_job_set_current_heap_addr(core->running_job, mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR)); if (MALI_TRUE != suspend) { core->running_job = NULL; _mali_osk_timer_del(core->timeout_timer); } } }
static void mali_pp_post_process_job(struct mali_pp_core *core) { MALI_ASSERT_GROUP_LOCKED(core->group); if (NULL != core->running_job) { u32 val0 = 0; u32 val1 = 0; #if MALI_TIMELINE_PROFILING_ENABLED int counter_index = COUNTER_FP0_C0 + (2 * core->core_id); #endif if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used) { val0 = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE); if (mali_pp_job_get_perf_counter_flag(core->running_job) && _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE && mali_pp_job_get_perf_counter_src0(core->running_job) == core->counter_src0_used) { mali_pp_job_set_perf_counter_value0(core->running_job, core->running_sub_job, val0); } else { mali_pp_job_set_perf_counter_value0(core->running_job, core->running_sub_job, MALI_HW_CORE_INVALID_VALUE); } #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_report_hw_counter(counter_index, val0); #endif } if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used) { val1 = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE); if (mali_pp_job_get_perf_counter_flag(core->running_job) && _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE && mali_pp_job_get_perf_counter_src1(core->running_job) == core->counter_src1_used) { mali_pp_job_set_perf_counter_value1(core->running_job, core->running_sub_job, val1); } else { mali_pp_job_set_perf_counter_value1(core->running_job, core->running_sub_job, MALI_HW_CORE_INVALID_VALUE); } #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_report_hw_counter(counter_index + 1, val1); #endif } #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id), val0, val1, core->counter_src0_used | (core->counter_src1_used << 8), 0, 0); #endif core->running_job = NULL; _mali_osk_timer_del(core->timeout_timer); } }
_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core) { int i; const int request_loop_count = 20; MALI_DEBUG_ASSERT_POINTER(core); MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description)); MALI_ASSERT_GROUP_LOCKED(core->group); mali_gp_post_process_job(core, MALI_FALSE); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); #if defined(USING_MALI200) mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS); for (i = 0; i < request_loop_count; i++) { if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) { break; } _mali_osk_time_ubusydelay(10); } if (request_loop_count == i) { MALI_PRINT_ERROR(("Mali GP: Failed to stop bus for core %s, unable to recover\n", core->hw_core.description)); return _MALI_OSK_ERR_FAULT; } mali_gp_hard_reset(core); #elif defined(USING_MALI400) mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET); for (i = 0; i < request_loop_count; i++) { if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) { break; } _mali_osk_time_ubusydelay(10); } if (request_loop_count == i) { MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, unable to recover\n", core->hw_core.description)); return _MALI_OSK_ERR_FAULT; } #else #error "no supported mali core defined" #endif mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); return _MALI_OSK_ERR_OK; }
mali_bool mali_group_power_is_on(struct mali_group *group) { MALI_ASSERT_GROUP_LOCKED(group); return group->power_is_on; }
void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job) { u32 *frame_registers = mali_pp_job_get_frame_registers(job); u32 *wb0_registers = mali_pp_job_get_wb0_registers(job); u32 *wb1_registers = mali_pp_job_get_wb1_registers(job); u32 *wb2_registers = mali_pp_job_get_wb2_registers(job); core->counter_src0_used = core->counter_src0; core->counter_src1_used = core->counter_src1; MALI_DEBUG_ASSERT_POINTER(core); MALI_ASSERT_GROUP_LOCKED(core->group); mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, frame_registers, MALI200_NUM_REGS_FRAME, mali_frame_registers_reset_values); _mali_osk_mem_barrier(); if (0 != sub_job) { /* * There are two frame registers which are different for each sub job. * For the first sub job, these are correctly represented in the frame register array, * but we need to patch these for all other sub jobs */ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job)); mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK/4]); } if (wb0_registers[0]) /* M200_WB0_REG_SOURCE_SELECT register */ { mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, MALI200_NUM_REGS_WBx, mali_wb_registers_reset_values); } else { mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, 0, mali_wb_registers_reset_values[0] ); } if (wb1_registers[0]) /* M200_WB1_REG_SOURCE_SELECT register */ { mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, MALI200_NUM_REGS_WBx, mali_wb_registers_reset_values); } else { mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, 0, mali_wb_registers_reset_values[0] ); } if (wb2_registers[0]) /* M200_WB2_REG_SOURCE_SELECT register */ { mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, MALI200_NUM_REGS_WBx, mali_wb_registers_reset_values); } else { mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, 0, mali_wb_registers_reset_values[0] ); } /* This selects which performance counters we are reading */ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src1_used) { /* global_config has enabled HW counters, this will override anything specified by user space */ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used) { mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used); mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value); } if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used) { mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used); mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value); } } else { /* Use HW counters from job object, if any */ u32 perf_counter_flag = mali_pp_job_get_perf_counter_flag(job); if (0 != perf_counter_flag) { if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) { core->counter_src0_used = mali_pp_job_get_perf_counter_src0(job); mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used); mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value); } if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) { core->counter_src1_used = mali_pp_job_get_perf_counter_src1(job); mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used); mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value); } } } MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description)); /* Adding barrier to make sure all rester writes are finished */ _mali_osk_write_mem_barrier(); /* This is the command that starts the core. */ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING); /* Adding barrier to make sure previous rester writes is finished */ _mali_osk_write_mem_barrier(); /* Setup the timeout timer value and save the job id for the job running on the pp core */ _mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime)); core->timeout_job_id = mali_pp_job_get_id(job); #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id), mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); #endif core->running_job = job; core->running_sub_job = sub_job; }
void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job) { u32 startcmd = 0; u32 *frame_registers = mali_gp_job_get_frame_registers(job); core->counter_src0_used = core->counter_src0; core->counter_src1_used = core->counter_src1; MALI_DEBUG_ASSERT_POINTER(core); MALI_ASSERT_GROUP_LOCKED(core->group); if (mali_gp_job_has_vs_job(job)) { startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS; } if (mali_gp_job_has_plbu_job(job)) { startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU; } MALI_DEBUG_ASSERT(0 != startcmd); mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME); #if PROFILING_PRINT_L2_HITRATE_ON_GP_FINISH { mali_l2_cache_core_set_counter_src0(mali_l2_cache_core_get_glob_l2_core(0), 20); mali_l2_cache_core_set_counter_src1(mali_l2_cache_core_get_glob_l2_core(0), 21); } #endif if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src0_used) { if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used) { mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); } if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used) { mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); } } else { u32 perf_counter_flag = mali_gp_job_get_perf_counter_flag(job); if (0 != perf_counter_flag) { if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) { core->counter_src0_used = mali_gp_job_get_perf_counter_src0(job); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); } if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) { core->counter_src1_used = mali_gp_job_get_perf_counter_src1(job); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); } } } MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd)); _mali_osk_write_mem_barrier(); mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd); _mali_osk_write_mem_barrier(); _mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime)); core->timeout_job_id = mali_gp_job_get_id(job); #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, job->frame_builder_id, job->flush_id, 0, 0, 0); _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), job->pid, job->tid, 0, 0, 0); #endif core->running_job = job; }
void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job) { u32 startcmd = 0; u32 *frame_registers = mali_gp_job_get_frame_registers(job); core->counter_src0_used = core->counter_src0; core->counter_src1_used = core->counter_src1; MALI_DEBUG_ASSERT_POINTER(core); MALI_ASSERT_GROUP_LOCKED(core->group); if (mali_gp_job_has_vs_job(job)) { startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS; } if (mali_gp_job_has_plbu_job(job)) { startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU; } MALI_DEBUG_ASSERT(0 != startcmd); mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME); #if PROFILING_PRINT_L2_HITRATE_ON_GP_FINISH { /* Read hits and Read misses*/ mali_l2_cache_core_set_counter_src0(mali_l2_cache_core_get_glob_l2_core(0), 20); mali_l2_cache_core_set_counter_src1(mali_l2_cache_core_get_glob_l2_core(0), 21); } #endif /* This selects which performance counters we are reading */ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src0_used) { /* global_config has enabled HW counters, this will override anything specified by user space */ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used) { mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); } if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used) { mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); } } else { /* Use HW counters from job object, if any */ u32 perf_counter_flag = mali_gp_job_get_perf_counter_flag(job); if (0 != perf_counter_flag) { if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) { core->counter_src0_used = mali_gp_job_get_perf_counter_src0(job); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); } if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) { core->counter_src1_used = mali_gp_job_get_perf_counter_src1(job); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used); mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); } } } MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd)); /* Barrier to make sure the previous register write is finished */ _mali_osk_write_mem_barrier(); /* This is the command that starts the core. */ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd); /* Barrier to make sure the previous register write is finished */ _mali_osk_write_mem_barrier(); /* Setup the timeout timer value and save the job id for the job running on the gp core */ _mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime)); core->timeout_job_id = mali_gp_job_get_id(job); #if MALI_TIMELINE_PROFILING_ENABLED _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0); _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0); #endif core->running_job = job; }