static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data)
{
	unsigned long flags;
	kbase_device *kbdev;
	mali_bool do_poweroff = MALI_FALSE;

	kbdev = container_of(data, kbase_device, pm.gpu_poweroff_work);

	mutex_lock(&kbdev->pm.lock);

	spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);

	/* Only power off the GPU if a request is still pending */
	if (kbdev->pm.gpu_poweroff_pending != MALI_FALSE &&
	    kbdev->pm.pm_current_policy->get_core_active(kbdev) == MALI_FALSE)
		do_poweroff = MALI_TRUE;

	kbdev->pm.gpu_poweroff_pending = MALI_FALSE;

	spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);

	if (do_poweroff != MALI_FALSE) {
		/* Power off the GPU */
#if 0 // SOC_NAME == 5430
		kbdev->pm.shader_poweroff_pending = 0;
#endif /* SOC_NAME */
		kbase_pm_do_poweroff(kbdev, MALI_FALSE);
	}

	mutex_unlock(&kbdev->pm.lock);
}
void kbase_pm_halt(kbase_device *kbdev)
{
	KBASE_DEBUG_ASSERT(kbdev != NULL);

	mutex_lock(&kbdev->pm.lock);
	kbase_pm_do_poweroff(kbdev);
	mutex_unlock(&kbdev->pm.lock);
}
void kbase_pm_halt(struct kbase_device *kbdev)
{
	KBASE_DEBUG_ASSERT(kbdev != NULL);

	mutex_lock(&kbdev->pm.lock);
	kbase_pm_cancel_deferred_poweroff(kbdev);
	kbase_pm_do_poweroff(kbdev, MALI_FALSE);
	mutex_unlock(&kbdev->pm.lock);
}
void kbase_pm_suspend(struct kbase_device *kbdev)
{
	int nr_keep_gpu_powered_ctxs;

	KBASE_DEBUG_ASSERT(kbdev);

	/* MALI_SEC_INTEGRATION */
	if(kbdev->vendor_callbacks->hwcnt_prepare_suspend)
		kbdev->vendor_callbacks->hwcnt_prepare_suspend(kbdev);

	mutex_lock(&kbdev->pm.lock);
	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
	kbdev->pm.suspending = MALI_TRUE;
	mutex_unlock(&kbdev->pm.lock);

	/* From now on, the active count will drop towards zero. Sometimes, it'll
	 * go up briefly before going down again. However, once it reaches zero it
	 * will stay there - guaranteeing that we've idled all pm references */

	/* Suspend job scheduler and associated components, so that it releases all
	 * the PM active count references */
	kbasep_js_suspend(kbdev);

#ifndef MALI_SEC_HWCNT
	/* Suspend any counter collection that might be happening */
	kbase_instr_hwcnt_suspend(kbdev);
#endif

	/* Cancel the keep_gpu_powered calls */
	for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
		 nr_keep_gpu_powered_ctxs > 0;
		 --nr_keep_gpu_powered_ctxs) {
		kbase_pm_context_idle(kbdev);
	}

	/* Wait for the active count to reach zero. This is not the same as
	 * waiting for a power down, since not all policies power down when this
	 * reaches zero. */
	wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);

	/* NOTE: We synchronize with anything that was just finishing a
	 * kbase_pm_context_idle() call by locking the pm.lock below */

	/* Force power off the GPU and all cores (regardless of policy), only after
	 * the PM active count reaches zero (otherwise, we risk turning it off
	 * prematurely) */
	mutex_lock(&kbdev->pm.lock);
	kbase_pm_cancel_deferred_poweroff(kbdev);
	kbase_pm_do_poweroff(kbdev, MALI_TRUE);
	mutex_unlock(&kbdev->pm.lock);
}
static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data)
{
	unsigned long flags;
	kbase_device *kbdev;
	mali_bool do_poweroff = MALI_FALSE;

	kbdev = container_of(data, kbase_device, pm.gpu_poweroff_work);

	mutex_lock(&kbdev->pm.lock);

	if (kbdev->pm.gpu_poweroff_pending == 0) {
		mutex_unlock(&kbdev->pm.lock);
		return;
	}

	kbdev->pm.gpu_poweroff_pending--;

	if (kbdev->pm.gpu_poweroff_pending > 0) {
		mutex_unlock(&kbdev->pm.lock);
		return;
	}

	KBASE_DEBUG_ASSERT(kbdev->pm.gpu_poweroff_pending == 0);

	spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);

	/* Only power off the GPU if a request is still pending */
	if (kbdev->pm.pm_current_policy->get_core_active(kbdev) == MALI_FALSE)
		do_poweroff = MALI_TRUE;

	spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);

	if (do_poweroff != MALI_FALSE) {
		kbdev->pm.poweroff_timer_running = MALI_FALSE;
		/* Power off the GPU */
		kbase_pm_do_poweroff(kbdev);
		hrtimer_cancel(&kbdev->pm.gpu_poweroff_timer);
	}

	mutex_unlock(&kbdev->pm.lock);
}