Exemplo n.º 1
0
static enum hrtimer_restart kbasep_pm_do_shader_poweroff_callback(struct hrtimer *timer)
{
	kbase_device *kbdev;
	unsigned long flags;

	kbdev = container_of(timer, kbase_device, pm.shader_poweroff_timer);

	spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);

	if (kbdev->pm.shader_poweroff_pending != 0) {
		u64 prev_shader_state = kbdev->pm.desired_shader_state;
		mali_bool cores_are_available;

		kbdev->pm.desired_shader_state &= ~kbdev->pm.shader_poweroff_pending;
		kbdev->pm.shader_poweroff_pending = 0;

		if (prev_shader_state != kbdev->pm.desired_shader_state ||
				kbdev->pm.ca_in_transition != MALI_FALSE) {
			KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START);
			cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
			KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END);
		}

		/* Don't need 'cores_are_available', because we don't return anything */
		CSTD_UNUSED(cores_are_available);
	}

	spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);

	return HRTIMER_NORESTART;
}
Exemplo n.º 2
0
void kbase_pm_do_poweron(kbase_device *kbdev, mali_bool is_resume)
{
	volatile u64 cores_powered;
	int cnt = 0xFFFF;
	lockdep_assert_held(&kbdev->pm.lock);

	/* Turn clocks and interrupts on - no-op if we haven't done a previous
	 * kbase_pm_clock_off() */
	kbase_pm_clock_on(kbdev, is_resume);

	/* Update core status as required by the policy */
	KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
	kbase_pm_update_cores_state(kbdev);
	KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);

	/* NOTE: We don't wait to reach the desired state, since running atoms
	 * will wait for that state to be reached anyway */

	do {
		cores_powered = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
		cnt --;
	} while(((cores_powered & 0x3f) != 0x3f) && cnt > 0);

	if(cnt == 0)
		KBASE_TRACE_ADD_EXYNOS(kbdev, LSI_POWER_ON_COUNT_EXPIRED, NULL, NULL, 0u, 0u);

	if (kbdev->pm.callback_power_on_post)
		kbdev->pm.callback_power_on_post(kbdev);
}
static enum hrtimer_restart kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer)
{
	kbase_device *kbdev;

	kbdev = container_of(timer, kbase_device, pm.gpu_poweroff_timer);

	/* It is safe for this call to do nothing if the work item is already queued.
	 * The worker function will read the must up-to-date state of kbdev->pm.gpu_poweroff_pending
	 * under lock.
	 *
	 * If a state change occurs while the worker function is processing, this
	 * call will succeed as a work item can be requeued once it has started
	 * processing. 
	 */
	if (kbdev->pm.gpu_poweroff_pending)
		queue_work(kbdev->pm.gpu_poweroff_wq, &kbdev->pm.gpu_poweroff_work);

	if (kbdev->pm.shader_poweroff_pending) {
		unsigned long flags;

		spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);

		if (kbdev->pm.shader_poweroff_pending) {
			kbdev->pm.shader_poweroff_pending_time--;

			KBASE_DEBUG_ASSERT(kbdev->pm.shader_poweroff_pending_time >= 0);

			if (kbdev->pm.shader_poweroff_pending_time == 0) {
				u64 prev_shader_state = kbdev->pm.desired_shader_state;

				kbdev->pm.desired_shader_state &= ~kbdev->pm.shader_poweroff_pending;
				kbdev->pm.shader_poweroff_pending = 0;

				if (prev_shader_state != kbdev->pm.desired_shader_state ||
			    	    kbdev->pm.ca_in_transition != MALI_FALSE) {
					mali_bool cores_are_available;

					KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START);
					cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
					KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END);		

					/* Don't need 'cores_are_available', because we don't return anything */
					CSTD_UNUSED(cores_are_available);
				}
			}
		}

		spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
	}

	hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time);
	return HRTIMER_RESTART;
}
Exemplo n.º 4
0
void kbase_pm_do_poweron(kbase_device *kbdev)
{
	lockdep_assert_held(&kbdev->pm.lock);

	/* Turn clocks and interrupts on - no-op if we haven't done a previous
	 * kbase_pm_clock_off() */
	kbase_pm_clock_on(kbdev);

	/* Update core status as required by the policy */
	KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
	kbase_pm_update_cores_state(kbdev);
	KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);

	/* NOTE: We don't wait to reach the desired state, since running atoms
	 * will wait for that state to be reached anyway */
}
STATIC INLINE void kbase_timeline_pm_cores_func(kbase_device *kbdev,
                                                kbase_pm_func_id func_id,
                                                kbase_pm_change_state state)
{
	int trace_code;
	KBASE_DEBUG_ASSERT(func_id >= 0 && func_id < KBASE_PM_FUNC_ID_COUNT);
	KBASE_DEBUG_ASSERT(state != 0 && (state & KBASE_PM_CHANGE_STATE_MASK) == state);

	trace_code = kbase_pm_change_state_trace_code[func_id][state];
	KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code);
}
Exemplo n.º 6
0
void kbase_pm_do_poweroff(kbase_device *kbdev)
{
	unsigned long flags;
	mali_bool cores_are_available;

	lockdep_assert_held(&kbdev->pm.lock);

	spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);

	/* Force all cores off */
	kbdev->pm.desired_shader_state = 0;

	/* Force all cores to be unavailable, in the situation where 
	 * transitions are in progress for some cores but not others,
	 * and kbase_pm_check_transitions_nolock can not immediately
	 * power off the cores */
	kbdev->shader_available_bitmap = 0;
	kbdev->tiler_available_bitmap = 0;
	kbdev->l2_available_bitmap = 0;

	KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
	cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
	KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
	/* Don't need 'cores_are_available', because we don't return anything */
	CSTD_UNUSED(cores_are_available);

	spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);

	/* NOTE: We won't wait to reach the core's desired state, even if we're
	 * powering off the GPU itself too. It's safe to cut the power whilst
	 * they're transitioning to off, because the cores should be idle and all
	 * cache flushes should already have occurred */

	/* Consume any change-state events */
	kbase_timeline_pm_check_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
	/* Disable interrupts and turn the clock off */
	kbase_pm_clock_off(kbdev);
}