int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler) { int c; int old_count; KBASE_DEBUG_ASSERT(kbdev != NULL); /* Trace timeline information about how long it took to handle the decision * to powerup. Sometimes the event might be missed due to reading the count * outside of mutex, but this is necessary to get the trace timing * correct. */ old_count = kbdev->pm.active_count; if (old_count == 0) kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE); mutex_lock(&kbdev->pm.lock); if (kbase_pm_is_suspending(kbdev)) { switch (suspend_handler) { case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE: if (kbdev->pm.active_count != 0) break; /* FALLTHROUGH */ case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE: mutex_unlock(&kbdev->pm.lock); if (old_count == 0) kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE); return 1; case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE: /* FALLTHROUGH */ default: KBASE_DEBUG_ASSERT_MSG(MALI_FALSE, "unreachable"); break; } } c = ++kbdev->pm.active_count; KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c); KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c); /* Trace the event being handled */ if (old_count == 0) kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE); if (c == 1) { /* First context active: Power on the GPU and any cores requested by * the policy */ kbase_pm_update_active(kbdev); #ifndef MALI_SEC_SEPERATED_UTILIZATION kbasep_pm_record_gpu_active(kbdev); #endif } mutex_unlock(&kbdev->pm.lock); return 0; }
void kbase_pm_set_policy(kbase_device *kbdev, const kbase_pm_policy *new_policy) { const kbase_pm_policy *old_policy; unsigned long flags; KBASE_DEBUG_ASSERT(kbdev != NULL); KBASE_DEBUG_ASSERT(new_policy != NULL); KBASE_TRACE_ADD(kbdev, PM_SET_POLICY, NULL, NULL, 0u, new_policy->id); /* During a policy change we pretend the GPU is active */ /* A suspend won't happen here, because we're in a syscall from a userspace thread */ kbase_pm_context_active(kbdev); mutex_lock(&kbdev->pm.lock); /* Remove the policy to prevent IRQ handlers from working on it */ spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); old_policy = kbdev->pm.pm_current_policy; kbdev->pm.pm_current_policy = NULL; spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u, old_policy->id); if (old_policy->term) old_policy->term(kbdev); KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_INIT, NULL, NULL, 0u, new_policy->id); if (new_policy->init) new_policy->init(kbdev); spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); kbdev->pm.pm_current_policy = new_policy; spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); /* If any core power state changes were previously attempted, but couldn't * be made because the policy was changing (current_policy was NULL), then * re-try them here. */ kbase_pm_update_active(kbdev); kbase_pm_update_cores_state(kbdev); mutex_unlock(&kbdev->pm.lock); /* Now the policy change is finished, we release our fake context active reference */ kbase_pm_context_idle(kbdev); }
void kbase_pm_context_idle(struct kbase_device *kbdev) { int c; int old_count; KBASE_DEBUG_ASSERT(kbdev != NULL); /* Trace timeline information about how long it took to handle the decision * to powerdown. Sometimes the event might be missed due to reading the * count outside of mutex, but this is necessary to get the trace timing * correct. */ old_count = kbdev->pm.active_count; if (old_count == 0) kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE); mutex_lock(&kbdev->pm.lock); c = --kbdev->pm.active_count; KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c); KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c); KBASE_DEBUG_ASSERT(c >= 0); /* Trace the event being handled */ if (old_count == 0) kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE); if (c == 0) { /* Last context has gone idle */ kbase_pm_update_active(kbdev); #ifndef MALI_SEC_SEPERATED_UTILIZATION kbasep_pm_record_gpu_idle(kbdev); #endif /* Wake up anyone waiting for this to become 0 (e.g. suspend). The * waiters must synchronize with us by locking the pm.lock after * waiting */ wake_up(&kbdev->pm.zero_active_count_wait); } mutex_unlock(&kbdev->pm.lock); }