void kbase_pm_context_idle(kbase_device *kbdev) { unsigned long flags; int c; OSK_ASSERT(kbdev != NULL); spin_lock_irqsave(&kbdev->pm.active_count_lock, flags); c = --kbdev->pm.active_count; KBASE_TRACE_ADD_REFCOUNT( kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c ); OSK_ASSERT(c >= 0); if (c == 0) { /* Last context has gone idle */ kbase_pm_send_event(kbdev, KBASE_PM_EVENT_GPU_IDLE); kbasep_pm_record_gpu_idle(kbdev); } /* We must wait for the above functions to finish (in the case c==0) before releasing the lock otherwise there is * a race with another thread calling kbase_pm_context_active - in this case the IDLE message could be sent * *after* the ACTIVE message causing the policy and metrics systems to become confused */ spin_unlock_irqrestore(&kbdev->pm.active_count_lock, flags); }
void kbase_pm_context_idle(struct kbase_device *kbdev) { int c; int old_count; KBASE_DEBUG_ASSERT(kbdev != NULL); /* Trace timeline information about how long it took to handle the decision * to powerdown. Sometimes the event might be missed due to reading the * count outside of mutex, but this is necessary to get the trace timing * correct. */ old_count = kbdev->pm.active_count; if (old_count == 0) kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE); mutex_lock(&kbdev->pm.lock); c = --kbdev->pm.active_count; KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c); KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c); KBASE_DEBUG_ASSERT(c >= 0); /* Trace the event being handled */ if (old_count == 0) kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE); if (c == 0) { /* Last context has gone idle */ kbase_pm_update_active(kbdev); #ifndef MALI_SEC_SEPERATED_UTILIZATION kbasep_pm_record_gpu_idle(kbdev); #endif /* Wake up anyone waiting for this to become 0 (e.g. suspend). The * waiters must synchronize with us by locking the pm.lock after * waiting */ wake_up(&kbdev->pm.zero_active_count_wait); } mutex_unlock(&kbdev->pm.lock); }