void kbase_pm_suspend(struct kbase_device *kbdev)
{
    KBASE_DEBUG_ASSERT(kbdev);

    mutex_lock(&kbdev->pm.lock);
    KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
    kbdev->pm.suspending = true;
    mutex_unlock(&kbdev->pm.lock);

    /* From now on, the active count will drop towards zero. Sometimes, it'll
     * go up briefly before going down again. However, once it reaches zero it
     * will stay there - guaranteeing that we've idled all pm references */

    /* Suspend job scheduler and associated components, so that it releases all
     * the PM active count references */
    kbasep_js_suspend(kbdev);

    /* Suspend any counter collection that might be happening */
    kbase_instr_hwcnt_suspend(kbdev);

    /* Wait for the active count to reach zero. This is not the same as
     * waiting for a power down, since not all policies power down when this
     * reaches zero. */
    wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);

    /* NOTE: We synchronize with anything that was just finishing a
     * kbase_pm_context_idle() call by locking the pm.lock below */

    kbase_hwaccess_pm_suspend(kbdev);
}
mali_error kbase_pm_powerup(struct kbase_device *kbdev)
{
	unsigned long flags;
	mali_error ret;

	unsigned int code; //mtk
	unsigned int gpu_efuse;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	mutex_lock(&kbdev->pm.lock);

	/* A suspend won't happen during startup/insmod */
	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));

	/* Power up the GPU, don't enable IRQs as we are not ready to receive them. */
	ret = kbase_pm_init_hw(kbdev, MALI_FALSE);
	if (ret != MALI_ERROR_NONE) {
		mutex_unlock(&kbdev->pm.lock);
		return ret;
	}

	kbasep_pm_read_present_cores(kbdev);

	// mtk
	code = mt_get_chip_hw_code();
	if (0x321 == code) // Denali-1(6735)
	{     
     // read GPU efuse info.
     gpu_efuse = (get_devinfo_with_index(3) >> 7)&0x01;
	 if( gpu_efuse == 1 ) 
	 	 kbdev->pm.debug_core_mask = (u64)1;	 // 1-core
	 else				
	 	 kbdev->pm.debug_core_mask = (u64)3;	 // 2-core
	} 
int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)
{
	int c;
	int old_count;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	/* Trace timeline information about how long it took to handle the decision
	 * to powerup. Sometimes the event might be missed due to reading the count
	 * outside of mutex, but this is necessary to get the trace timing
	 * correct. */
	old_count = kbdev->pm.active_count;
	if (old_count == 0)
		kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);

	mutex_lock(&kbdev->pm.lock);
	if (kbase_pm_is_suspending(kbdev)) {
		switch (suspend_handler) {
		case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
			if (kbdev->pm.active_count != 0)
				break;
			/* FALLTHROUGH */
		case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
			mutex_unlock(&kbdev->pm.lock);
			if (old_count == 0)
				kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
			return 1;

		case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
			/* FALLTHROUGH */
		default:
			KBASE_DEBUG_ASSERT_MSG(MALI_FALSE, "unreachable");
			break;
		}
	}
	c = ++kbdev->pm.active_count;
	KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);

	KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);

	/* Trace the event being handled */
	if (old_count == 0)
		kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);

	if (c == 1) {
		/* First context active: Power on the GPU and any cores requested by
		 * the policy */
		kbase_pm_update_active(kbdev);

#ifndef MALI_SEC_SEPERATED_UTILIZATION
		kbasep_pm_record_gpu_active(kbdev);
#endif
	}

	mutex_unlock(&kbdev->pm.lock);

	return 0;
}
mali_error kbase_pm_powerup(struct kbase_device *kbdev)
{
	unsigned long flags;
	mali_error ret;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	mutex_lock(&kbdev->pm.lock);

	/* A suspend won't happen during startup/insmod */
	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));

	/* MALI_SEC_INTEGRATION */
	/* while the GPU initialization, vendor desired gpu log will be out by set_power_dbg(FALSE) calls */
	if(kbdev->vendor_callbacks->set_poweron_dbg)
		kbdev->vendor_callbacks->set_poweron_dbg(FALSE);

	/* Power up the GPU, don't enable IRQs as we are not ready to receive them. */
	ret = kbase_pm_init_hw(kbdev, MALI_FALSE);
	if (ret != MALI_ERROR_NONE) {
		mutex_unlock(&kbdev->pm.lock);
		return ret;
	}

	kbasep_pm_read_present_cores(kbdev);

	kbdev->pm.debug_core_mask = kbdev->shader_present_bitmap;

	/* Pretend the GPU is active to prevent a power policy turning the GPU cores off */
	kbdev->pm.active_count = 1;

	spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
	/* Ensure cycle counter is off */
	kbdev->pm.gpu_cycle_counter_requests = 0;
	spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);

	/* We are ready to receive IRQ's now as power policy is set up, so enable them now. */
#ifdef CONFIG_MALI_DEBUG
	spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
	kbdev->pm.driver_ready_for_irqs = MALI_TRUE;
	spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
#endif
	kbase_pm_enable_interrupts(kbdev);

	/* Turn on the GPU and any cores needed by the policy */
	kbase_pm_do_poweron(kbdev, MALI_FALSE);
	mutex_unlock(&kbdev->pm.lock);

	/* MALI_SEC_INTEGRATION */
	if(kbdev->vendor_callbacks->hwcnt_init)
		kbdev->vendor_callbacks->hwcnt_init(kbdev);

	/* Idle the GPU and/or cores, if the policy wants it to */
	kbase_pm_context_idle(kbdev);

	return MALI_ERROR_NONE;
}
void kbase_pm_suspend(struct kbase_device *kbdev)
{
	int nr_keep_gpu_powered_ctxs;

	KBASE_DEBUG_ASSERT(kbdev);

	/* MALI_SEC_INTEGRATION */
	if(kbdev->vendor_callbacks->hwcnt_prepare_suspend)
		kbdev->vendor_callbacks->hwcnt_prepare_suspend(kbdev);

	mutex_lock(&kbdev->pm.lock);
	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
	kbdev->pm.suspending = MALI_TRUE;
	mutex_unlock(&kbdev->pm.lock);

	/* From now on, the active count will drop towards zero. Sometimes, it'll
	 * go up briefly before going down again. However, once it reaches zero it
	 * will stay there - guaranteeing that we've idled all pm references */

	/* Suspend job scheduler and associated components, so that it releases all
	 * the PM active count references */
	kbasep_js_suspend(kbdev);

#ifndef MALI_SEC_HWCNT
	/* Suspend any counter collection that might be happening */
	kbase_instr_hwcnt_suspend(kbdev);
#endif

	/* Cancel the keep_gpu_powered calls */
	for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
		 nr_keep_gpu_powered_ctxs > 0;
		 --nr_keep_gpu_powered_ctxs) {
		kbase_pm_context_idle(kbdev);
	}

	/* Wait for the active count to reach zero. This is not the same as
	 * waiting for a power down, since not all policies power down when this
	 * reaches zero. */
	wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);

	/* NOTE: We synchronize with anything that was just finishing a
	 * kbase_pm_context_idle() call by locking the pm.lock below */

	/* Force power off the GPU and all cores (regardless of policy), only after
	 * the PM active count reaches zero (otherwise, we risk turning it off
	 * prematurely) */
	mutex_lock(&kbdev->pm.lock);
	kbase_pm_cancel_deferred_poweroff(kbdev);
	kbase_pm_do_poweroff(kbdev, MALI_TRUE);
	mutex_unlock(&kbdev->pm.lock);
}
mali_error kbase_pm_powerup(kbase_device *kbdev)
{
	unsigned long flags;
	mali_error ret;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	mutex_lock(&kbdev->pm.lock);

	/* A suspend won't happen during startup/insmod */
	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));

	/* Power up the GPU, don't enable IRQs as we are not ready to receive them. */
	ret = kbase_pm_init_hw(kbdev, MALI_FALSE );
	if (ret != MALI_ERROR_NONE) {
		mutex_unlock(&kbdev->pm.lock);
		return ret;
	}

	kbasep_pm_read_present_cores(kbdev);

	kbdev->pm.debug_core_mask = kbdev->shader_present_bitmap;

	/* Pretend the GPU is active to prevent a power policy turning the GPU cores off */
	kbdev->pm.active_count = 1;

	spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
	/* Ensure cycle counter is off */
	kbdev->pm.gpu_cycle_counter_requests = 0;
	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
	spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);

	/* We are ready to receive IRQ's now as power policy is set up, so enable them now. */
#ifdef CONFIG_MALI_DEBUG
	spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
	kbdev->pm.driver_ready_for_irqs = MALI_TRUE;
	spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
#endif
	kbase_pm_enable_interrupts(kbdev);

	/* Turn on the GPU and any cores needed by the policy */
	kbase_pm_do_poweron(kbdev);
	mutex_unlock(&kbdev->pm.lock);

	/* Idle the GPU and/or cores, if the policy wants it to */
	kbase_pm_context_idle(kbdev);

	return MALI_ERROR_NONE;
}