static enum hrtimer_restart kbasep_pm_do_shader_poweroff_callback(struct hrtimer *timer) { kbase_device *kbdev; unsigned long flags; kbdev = container_of(timer, kbase_device, pm.shader_poweroff_timer); spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); if (kbdev->pm.shader_poweroff_pending != 0) { u64 prev_shader_state = kbdev->pm.desired_shader_state; mali_bool cores_are_available; kbdev->pm.desired_shader_state &= ~kbdev->pm.shader_poweroff_pending; kbdev->pm.shader_poweroff_pending = 0; if (prev_shader_state != kbdev->pm.desired_shader_state || kbdev->pm.ca_in_transition != MALI_FALSE) { KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START); cores_are_available = kbase_pm_check_transitions_nolock(kbdev); KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END); } /* Don't need 'cores_are_available', because we don't return anything */ CSTD_UNUSED(cores_are_available); } spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); return HRTIMER_NORESTART; }
void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev) { u64 desired_bitmap; mali_bool cores_are_available; lockdep_assert_held(&kbdev->pm.power_change_lock); if (kbdev->pm.pm_current_policy == NULL) return; desired_bitmap = kbdev->pm.pm_current_policy->get_core_mask(kbdev); desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev); /* Enable core 0 if tiler required, regardless of core availability */ if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0) desired_bitmap |= 1; if (kbdev->pm.desired_shader_state != desired_bitmap) KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, NULL, 0u, (u32)desired_bitmap); /* Are any cores being powered on? */ if (~kbdev->pm.desired_shader_state & desired_bitmap || kbdev->pm.ca_in_transition != MALI_FALSE) { /* Check if we are powering off any cores before updating shader state */ if (kbdev->pm.desired_shader_state & ~desired_bitmap) { /* Start timer to power off cores */ kbdev->pm.shader_poweroff_pending |= (kbdev->pm.desired_shader_state & ~desired_bitmap); kbdev->pm.shader_poweroff_pending_time = kbdev->pm.poweroff_shader_ticks; } kbdev->pm.desired_shader_state = desired_bitmap; /* If any cores are being powered on, transition immediately */ cores_are_available = kbase_pm_check_transitions_nolock(kbdev); } else if (kbdev->pm.desired_shader_state & ~desired_bitmap) { /* Start timer to power off cores */ kbdev->pm.shader_poweroff_pending |= (kbdev->pm.desired_shader_state & ~desired_bitmap); kbdev->pm.shader_poweroff_pending_time = kbdev->pm.poweroff_shader_ticks; } else if (kbdev->pm.active_count == 0 && desired_bitmap != 0 && kbdev->pm.poweroff_timer_running) { /* If power policy is keeping cores on despite there being no active contexts * then disable poweroff timer as it isn't required */ kbdev->pm.poweroff_timer_running = MALI_FALSE; hrtimer_cancel(&kbdev->pm.gpu_poweroff_timer); } /* Ensure timer does not power off wanted cores and make sure to power off unwanted cores */ if (kbdev->pm.shader_poweroff_pending != 0) { kbdev->pm.shader_poweroff_pending &= ~(kbdev->pm.desired_shader_state & desired_bitmap); if (kbdev->pm.shader_poweroff_pending == 0) kbdev->pm.shader_poweroff_pending_time = 0; } /* Don't need 'cores_are_available', because we don't return anything */ CSTD_UNUSED(cores_are_available); }
static enum hrtimer_restart kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer) { kbase_device *kbdev; kbdev = container_of(timer, kbase_device, pm.gpu_poweroff_timer); /* It is safe for this call to do nothing if the work item is already queued. * The worker function will read the must up-to-date state of kbdev->pm.gpu_poweroff_pending * under lock. * * If a state change occurs while the worker function is processing, this * call will succeed as a work item can be requeued once it has started * processing. */ if (kbdev->pm.gpu_poweroff_pending) queue_work(kbdev->pm.gpu_poweroff_wq, &kbdev->pm.gpu_poweroff_work); if (kbdev->pm.shader_poweroff_pending) { unsigned long flags; spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); if (kbdev->pm.shader_poweroff_pending) { kbdev->pm.shader_poweroff_pending_time--; KBASE_DEBUG_ASSERT(kbdev->pm.shader_poweroff_pending_time >= 0); if (kbdev->pm.shader_poweroff_pending_time == 0) { u64 prev_shader_state = kbdev->pm.desired_shader_state; kbdev->pm.desired_shader_state &= ~kbdev->pm.shader_poweroff_pending; kbdev->pm.shader_poweroff_pending = 0; if (prev_shader_state != kbdev->pm.desired_shader_state || kbdev->pm.ca_in_transition != MALI_FALSE) { mali_bool cores_are_available; KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START); cores_are_available = kbase_pm_check_transitions_nolock(kbdev); KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END); /* Don't need 'cores_are_available', because we don't return anything */ CSTD_UNUSED(cores_are_available); } } } spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); } hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time); return HRTIMER_RESTART; }
void kbase_pm_do_poweroff(kbase_device *kbdev) { unsigned long flags; mali_bool cores_are_available; lockdep_assert_held(&kbdev->pm.lock); spin_lock_irqsave(&kbdev->pm.power_change_lock, flags); /* Force all cores off */ kbdev->pm.desired_shader_state = 0; /* Force all cores to be unavailable, in the situation where * transitions are in progress for some cores but not others, * and kbase_pm_check_transitions_nolock can not immediately * power off the cores */ kbdev->shader_available_bitmap = 0; kbdev->tiler_available_bitmap = 0; kbdev->l2_available_bitmap = 0; KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START); cores_are_available = kbase_pm_check_transitions_nolock(kbdev); KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END); /* Don't need 'cores_are_available', because we don't return anything */ CSTD_UNUSED(cores_are_available); spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags); /* NOTE: We won't wait to reach the core's desired state, even if we're * powering off the GPU itself too. It's safe to cut the power whilst * they're transitioning to off, because the cores should be idle and all * cache flushes should already have occurred */ /* Consume any change-state events */ kbase_timeline_pm_check_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED); /* Disable interrupts and turn the clock off */ kbase_pm_clock_off(kbdev); }