void kbase_pm_suspend(struct kbase_device *kbdev)
{
    KBASE_DEBUG_ASSERT(kbdev);

    mutex_lock(&kbdev->pm.lock);
    KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
    kbdev->pm.suspending = true;
    mutex_unlock(&kbdev->pm.lock);

    /* From now on, the active count will drop towards zero. Sometimes, it'll
     * go up briefly before going down again. However, once it reaches zero it
     * will stay there - guaranteeing that we've idled all pm references */

    /* Suspend job scheduler and associated components, so that it releases all
     * the PM active count references */
    kbasep_js_suspend(kbdev);

    /* Suspend any counter collection that might be happening */
    kbase_instr_hwcnt_suspend(kbdev);

    /* Wait for the active count to reach zero. This is not the same as
     * waiting for a power down, since not all policies power down when this
     * reaches zero. */
    wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);

    /* NOTE: We synchronize with anything that was just finishing a
     * kbase_pm_context_idle() call by locking the pm.lock below */

    kbase_hwaccess_pm_suspend(kbdev);
}
void kbase_pm_suspend(struct kbase_device *kbdev)
{
	int nr_keep_gpu_powered_ctxs;

	KBASE_DEBUG_ASSERT(kbdev);

	/* MALI_SEC_INTEGRATION */
	if(kbdev->vendor_callbacks->hwcnt_prepare_suspend)
		kbdev->vendor_callbacks->hwcnt_prepare_suspend(kbdev);

	mutex_lock(&kbdev->pm.lock);
	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
	kbdev->pm.suspending = MALI_TRUE;
	mutex_unlock(&kbdev->pm.lock);

	/* From now on, the active count will drop towards zero. Sometimes, it'll
	 * go up briefly before going down again. However, once it reaches zero it
	 * will stay there - guaranteeing that we've idled all pm references */

	/* Suspend job scheduler and associated components, so that it releases all
	 * the PM active count references */
	kbasep_js_suspend(kbdev);

#ifndef MALI_SEC_HWCNT
	/* Suspend any counter collection that might be happening */
	kbase_instr_hwcnt_suspend(kbdev);
#endif

	/* Cancel the keep_gpu_powered calls */
	for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
		 nr_keep_gpu_powered_ctxs > 0;
		 --nr_keep_gpu_powered_ctxs) {
		kbase_pm_context_idle(kbdev);
	}

	/* Wait for the active count to reach zero. This is not the same as
	 * waiting for a power down, since not all policies power down when this
	 * reaches zero. */
	wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);

	/* NOTE: We synchronize with anything that was just finishing a
	 * kbase_pm_context_idle() call by locking the pm.lock below */

	/* Force power off the GPU and all cores (regardless of policy), only after
	 * the PM active count reaches zero (otherwise, we risk turning it off
	 * prematurely) */
	mutex_lock(&kbdev->pm.lock);
	kbase_pm_cancel_deferred_poweroff(kbdev);
	kbase_pm_do_poweroff(kbdev, MALI_TRUE);
	mutex_unlock(&kbdev->pm.lock);
}