void kbase_pm_do_poweron(kbase_device *kbdev, mali_bool is_resume) { volatile u64 cores_powered; int cnt = 0xFFFF; lockdep_assert_held(&kbdev->pm.lock); /* Turn clocks and interrupts on - no-op if we haven't done a previous * kbase_pm_clock_off() */ kbase_pm_clock_on(kbdev, is_resume); /* Update core status as required by the policy */ KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START); kbase_pm_update_cores_state(kbdev); KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END); /* NOTE: We don't wait to reach the desired state, since running atoms * will wait for that state to be reached anyway */ do { cores_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER); cnt --; } while(((cores_powered & 0x3f) != 0x3f) && cnt > 0); if(cnt == 0) KBASE_TRACE_ADD_EXYNOS(kbdev, LSI_POWER_ON_COUNT_EXPIRED, NULL, NULL, 0u, 0u); if (kbdev->pm.callback_power_on_post) kbdev->pm.callback_power_on_post(kbdev); }
void kbase_pm_do_poweron(kbase_device *kbdev) { lockdep_assert_held(&kbdev->pm.lock); /* Turn clocks and interrupts on - no-op if we haven't done a previous * kbase_pm_clock_off() */ kbase_pm_clock_on(kbdev); /* Update core status as required by the policy */ KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START); kbase_pm_update_cores_state(kbdev); KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END); /* NOTE: We don't wait to reach the desired state, since running atoms * will wait for that state to be reached anyway */ }
void kbase_pm_set_policy(kbase_device *kbdev, const kbase_pm_policy *new_policy) { const kbase_pm_policy *old_policy; unsigned long flags; KBASE_DEBUG_ASSERT(kbdev != NULL); KBASE_DEBUG_ASSERT(new_policy != NULL); KBASE_TRACE_ADD(kbdev, PM_SET_POLICY, NULL, NULL, 0u, new_policy->id); /* During a policy change we pretend the GPU is active */ /* A suspend won't happen here, because we're in a syscall from a userspace thread */ kbase_pm_context_active(kbdev); mutex_lock(&kbdev->pm.lock); /* Remove the policy to prevent IRQ handlers from working on it */ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); old_policy = kbdev->pm.pm_current_policy; kbdev->pm.pm_current_policy = NULL; spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u, old_policy->id); if (old_policy->term) old_policy->term(kbdev); KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u, new_policy->id); if (new_policy->init) new_policy->init(kbdev); spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); kbdev->pm.pm_current_policy = new_policy; spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); /* If any core power state changes were previously attempted, but couldn't * be made because the policy was changing (current_policy was NULL), then * re-try them here. */ kbase_pm_update_active(kbdev); kbase_pm_update_cores_state(kbdev); mutex_unlock(&kbdev->pm.lock); /* Now the policy change is finished, we release our fake context active reference */ kbase_pm_context_idle(kbdev); }