コード例 #1
0
void kbase_pm_context_idle(kbase_device *kbdev)
{
	unsigned long flags;
	int c;

	OSK_ASSERT(kbdev != NULL);

	spin_lock_irqsave(&kbdev->pm.active_count_lock, flags);

	c = --kbdev->pm.active_count;

	KBASE_TRACE_ADD_REFCOUNT( kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c );

	OSK_ASSERT(c >= 0);
	
	if (c == 0)
	{
		/* Last context has gone idle */
		kbase_pm_send_event(kbdev, KBASE_PM_EVENT_GPU_IDLE);

		kbasep_pm_record_gpu_idle(kbdev);
	}

	/* We must wait for the above functions to finish (in the case c==0) before releasing the lock otherwise there is
	 * a race with another thread calling kbase_pm_context_active - in this case the IDLE message could be sent
	 * *after* the ACTIVE message causing the policy and metrics systems to become confused
	 */
	spin_unlock_irqrestore(&kbdev->pm.active_count_lock, flags);
}
コード例 #2
0
void kbase_pm_context_active(kbase_device *kbdev)
{
	unsigned long flags;
	int c;

	OSK_ASSERT(kbdev != NULL);

	spin_lock_irqsave(&kbdev->pm.active_count_lock, flags);
	c = ++kbdev->pm.active_count;
	spin_unlock_irqrestore(&kbdev->pm.active_count_lock, flags);

	KBASE_TRACE_ADD_REFCOUNT( kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c );

	if (c == 1)
	{
		/* First context active */
		kbase_pm_send_event(kbdev, KBASE_PM_EVENT_GPU_ACTIVE);

		kbasep_pm_record_gpu_active(kbdev);
	}
	/* Synchronise with the power policy to ensure that the event has been noticed */
	kbase_pm_wait_for_no_outstanding_events(kbdev);

	kbase_pm_wait_for_power_up(kbdev);
}
コード例 #3
0
int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)
{
	int c;
	int old_count;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	/* Trace timeline information about how long it took to handle the decision
	 * to powerup. Sometimes the event might be missed due to reading the count
	 * outside of mutex, but this is necessary to get the trace timing
	 * correct. */
	old_count = kbdev->pm.active_count;
	if (old_count == 0)
		kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);

	mutex_lock(&kbdev->pm.lock);
	if (kbase_pm_is_suspending(kbdev)) {
		switch (suspend_handler) {
		case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
			if (kbdev->pm.active_count != 0)
				break;
			/* FALLTHROUGH */
		case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
			mutex_unlock(&kbdev->pm.lock);
			if (old_count == 0)
				kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
			return 1;

		case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
			/* FALLTHROUGH */
		default:
			KBASE_DEBUG_ASSERT_MSG(MALI_FALSE, "unreachable");
			break;
		}
	}
	c = ++kbdev->pm.active_count;
	KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);

	KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);

	/* Trace the event being handled */
	if (old_count == 0)
		kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);

	if (c == 1) {
		/* First context active: Power on the GPU and any cores requested by
		 * the policy */
		kbase_pm_update_active(kbdev);

#ifndef MALI_SEC_SEPERATED_UTILIZATION
		kbasep_pm_record_gpu_active(kbdev);
#endif
	}

	mutex_unlock(&kbdev->pm.lock);

	return 0;
}
コード例 #4
0
void kbase_pm_context_idle(struct kbase_device *kbdev)
{
	int c;
	int old_count;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	/* Trace timeline information about how long it took to handle the decision
	 * to powerdown. Sometimes the event might be missed due to reading the
	 * count outside of mutex, but this is necessary to get the trace timing
	 * correct. */
	old_count = kbdev->pm.active_count;
	if (old_count == 0)
		kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);

	mutex_lock(&kbdev->pm.lock);

	c = --kbdev->pm.active_count;
	KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);

	KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);

	KBASE_DEBUG_ASSERT(c >= 0);

	/* Trace the event being handled */
	if (old_count == 0)
		kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);

	if (c == 0) {
		/* Last context has gone idle */
		kbase_pm_update_active(kbdev);

#ifndef MALI_SEC_SEPERATED_UTILIZATION
		kbasep_pm_record_gpu_idle(kbdev);
#endif

		/* Wake up anyone waiting for this to become 0 (e.g. suspend). The
		 * waiters must synchronize with us by locking the pm.lock after
		 * waiting */
		wake_up(&kbdev->pm.zero_active_count_wait);
	}

	mutex_unlock(&kbdev->pm.lock);
}