void kbase_pm_resume(struct kbase_device *kbdev)
{
	int nr_keep_gpu_powered_ctxs;

	/* MUST happen before any pm_context_active calls occur */
	mutex_lock(&kbdev->pm.lock);
	kbdev->pm.suspending = MALI_FALSE;
	mutex_unlock(&kbdev->pm.lock);

	/* Initial active call, to power on the GPU/cores if needed */
	kbase_pm_context_active(kbdev);

	/* Restore the keep_gpu_powered calls */
	for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
		 nr_keep_gpu_powered_ctxs > 0 ;
		 --nr_keep_gpu_powered_ctxs ) {
		kbase_pm_context_active(kbdev);
	}

	/* Re-enable instrumentation, if it was previously disabled */
	kbase_instr_hwcnt_resume(kbdev);

	/* Resume any blocked atoms (which may cause contexts to be scheduled in
	 * and dependent atoms to run) */
	kbase_resume_suspended_soft_jobs(kbdev);

	/* Resume the Job Scheduler and associated components, and start running
	 * atoms */
	kbasep_js_resume(kbdev);

	/* Matching idle call, to power off the GPU/cores if we didn't actually
	 * need it and the policy doesn't want it on */
	kbase_pm_context_idle(kbdev);
}
Esempio n. 2
0
void kbase_pm_resume(struct kbase_device *kbdev)
{
	int nr_keep_gpu_powered_ctxs;

	/* MUST happen before any pm_context_active calls occur */
	mutex_lock(&kbdev->pm.lock);
	kbdev->pm.suspending = MALI_FALSE;
	kbase_pm_do_poweron(kbdev, MALI_TRUE);
	mutex_unlock(&kbdev->pm.lock);

	/* Initial active call, to power on the GPU/cores if needed */
	kbase_pm_context_active(kbdev);

	/* Restore the keep_gpu_powered calls */
	for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
		 nr_keep_gpu_powered_ctxs > 0 ;
		 --nr_keep_gpu_powered_ctxs) {
		kbase_pm_context_active(kbdev);
	}

#if SLSI_INTEGRATION
	if (kbdev->hwcnt.prev_mm) {
		mutex_lock(&kbdev->hwcnt.mlock);

		if ((kbdev->hwcnt.enable_for_gpr == FALSE) && (kbdev->hwcnt.s_enable_for_utilization))
			kbdev->hwcnt.enable_for_utilization = TRUE;
		else
			kbdev->hwcnt.enable_for_utilization = FALSE;

		kbase_pm_policy_change(kbdev, 2);

		mutex_unlock(&kbdev->hwcnt.mlock);
	} else
#endif
	/* Re-enable instrumentation, if it was previously disabled */
	kbase_instr_hwcnt_resume(kbdev);

	/* Resume any blocked atoms (which may cause contexts to be scheduled in
	 * and dependent atoms to run) */
	kbase_resume_suspended_soft_jobs(kbdev);

	/* Resume the Job Scheduler and associated components, and start running
	 * atoms */
	kbasep_js_resume(kbdev);

	/* Matching idle call, to power off the GPU/cores if we didn't actually
	 * need it and the policy doesn't want it on */
	kbase_pm_context_idle(kbdev);
}
void kbase_pm_resume(struct kbase_device *kbdev)
{
    /* MUST happen before any pm_context_active calls occur */
    kbase_hwaccess_pm_resume(kbdev);

    /* Initial active call, to power on the GPU/cores if needed */
    kbase_pm_context_active(kbdev);

    /* Re-enable instrumentation, if it was previously disabled */
    kbase_instr_hwcnt_resume(kbdev);

    /* Resume any blocked atoms (which may cause contexts to be scheduled in
     * and dependent atoms to run) */
    kbase_resume_suspended_soft_jobs(kbdev);

    /* Resume the Job Scheduler and associated components, and start running
     * atoms */
    kbasep_js_resume(kbdev);

    /* Matching idle call, to power off the GPU/cores if we didn't actually
     * need it and the policy doesn't want it on */
    kbase_pm_context_idle(kbdev);
}