mali_bool mali_pm_runtime_suspend(void) { mali_bool ret; MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n")); mali_pm_exec_lock(); /* * Put SW state directly into "off" state, and do not bother to power * down each power domain, because entire GPU will be powered off * when we return. * For runtime PM suspend, in contrast to OS suspend, there is a race * between this function and the mali_pm_update_sync_internal(), which * is fine... */ ret = mali_pm_common_suspend(); if (MALI_TRUE == ret) { mali_pm_runtime_active = MALI_FALSE; } else { /* * Process the "power up" instead, * which could have been "lost" */ mali_pm_update_sync_internal(); } mali_pm_exec_unlock(); return ret; }
void mali_pmu_power_down_all(struct mali_pmu_core *pmu) { u32 stat; MALI_DEBUG_ASSERT_POINTER(pmu); MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0); mali_pm_exec_lock(); /* Now simply power down the domains which are marked as powered up */ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS); mali_pmu_power_down(pmu, (~stat) & pmu->registered_cores_mask); mali_pm_exec_unlock(); }
void mali_pm_update_sync(void) { mali_pm_exec_lock(); if (MALI_TRUE == mali_pm_runtime_active) { /* * Only update if GPU is powered on. * Deactivation of the last group will result in both a * deferred runtime PM suspend operation and * deferred execution of this function. * mali_pm_runtime_active will be false if runtime PM * executed first and thus the GPU is now fully powered off. */ mali_pm_update_sync_internal(); } mali_pm_exec_unlock(); }
void mali_pm_runtime_resume(void) { struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); mali_pm_exec_lock(); mali_pm_runtime_active = MALI_TRUE; #if defined(DEBUG) ++num_pm_runtime_resume; mali_pm_state_lock(); /* * Assert that things are as we left them in runtime_suspend(), * except for pd_mask_wanted which normally will be the reason we * got here (job queued => domains wanted) */ MALI_DEBUG_ASSERT(0 == pd_mask_current); MALI_DEBUG_ASSERT(0 == pmu_mask_current); mali_pm_state_unlock(); #endif if (NULL != pmu) { mali_pmu_reset(pmu); pmu_mask_current = mali_pmu_get_mask(pmu); MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume 0x%x \n", pmu_mask_current)); } /* * Normally we are resumed because a job has just been queued. * pd_mask_wanted should thus be != 0. * It is however possible for others to take a Mali Runtime PM ref * without having a job queued. * We should however always call mali_pm_update_sync_internal(), * because this will take care of any potential mismatch between * pmu_mask_current and pd_mask_current. */ mali_pm_update_sync_internal(); mali_pm_exec_unlock(); }
void mali_pm_os_suspend(mali_bool os_suspend) { int ret; MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n")); /* Suspend execution of all jobs, and go to inactive state */ mali_executor_suspend(); if (os_suspend) { mali_control_timer_suspend(MALI_TRUE); } mali_pm_exec_lock(); ret = mali_pm_common_suspend(); MALI_DEBUG_ASSERT(MALI_TRUE == ret); MALI_IGNORE(ret); mali_pm_exec_unlock(); }
void mali_pm_os_resume(void) { struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n")); mali_pm_exec_lock(); #if defined(DEBUG) mali_pm_state_lock(); /* Assert that things are as we left them in os_suspend(). */ MALI_DEBUG_ASSERT(0 == pd_mask_wanted); MALI_DEBUG_ASSERT(0 == pd_mask_current); MALI_DEBUG_ASSERT(0 == pmu_mask_current); MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused()); mali_pm_state_unlock(); #endif if (MALI_TRUE == mali_pm_runtime_active) { /* Runtime PM was active, so reset PMU */ if (NULL != pmu) { mali_pmu_reset(pmu); pmu_mask_current = mali_pmu_get_mask(pmu); MALI_DEBUG_PRINT(3, ("Mali PM: OS resume 0x%x \n", pmu_mask_current)); } mali_pm_update_sync_internal(); } mali_pm_exec_unlock(); /* Start executing jobs again */ mali_executor_resume(); }