Exemplo n.º 1
0
void kbase_pm_resume(struct kbase_device *kbdev)
{
	int nr_keep_gpu_powered_ctxs;

	/* MUST happen before any pm_context_active calls occur */
	mutex_lock(&kbdev->pm.lock);
	kbdev->pm.suspending = MALI_FALSE;
	kbase_pm_do_poweron(kbdev, MALI_TRUE);
	mutex_unlock(&kbdev->pm.lock);

	/* Initial active call, to power on the GPU/cores if needed */
	kbase_pm_context_active(kbdev);

	/* Restore the keep_gpu_powered calls */
	for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
		 nr_keep_gpu_powered_ctxs > 0;
		 --nr_keep_gpu_powered_ctxs) {
		kbase_pm_context_active(kbdev);
	}

	/* Re-enable instrumentation, if it was previously disabled */
	kbase_instr_hwcnt_resume(kbdev);

	/* Resume any blocked atoms (which may cause contexts to be scheduled in
	 * and dependent atoms to run) */
	kbase_resume_suspended_soft_jobs(kbdev);

	/* Resume the Job Scheduler and associated components, and start running
	 * atoms */
	kbasep_js_resume(kbdev);

	/* Matching idle call, to power off the GPU/cores if we didn't actually
	 * need it and the policy doesn't want it on */
	kbase_pm_context_idle(kbdev);
}
void kbase_pm_update_active(kbase_device *kbdev)
{
	unsigned long flags;
	mali_bool active;

	lockdep_assert_held(&kbdev->pm.lock);

	/* pm_current_policy will never be NULL while pm.lock is held */
	KBASE_DEBUG_ASSERT(kbdev->pm.pm_current_policy);

	spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);

	active = kbdev->pm.pm_current_policy->get_core_active(kbdev);

	if (active != MALI_FALSE) {
		spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);

		if (kbdev->pm.gpu_poweroff_pending) {
			/* Cancel any pending power off request */
			kbdev->pm.gpu_poweroff_pending = 0;

			/* If a request was pending then the GPU was still powered, so no need to continue */
			return;
		}

		if (!kbdev->pm.poweroff_timer_running && !kbdev->pm.gpu_powered) {
			kbdev->pm.poweroff_timer_running = MALI_TRUE;
			hrtimer_start(&kbdev->pm.gpu_poweroff_timer, kbdev->pm.gpu_poweroff_time, HRTIMER_MODE_REL);
		}

		/* Power on the GPU and any cores requested by the policy */
		kbase_pm_do_poweron(kbdev);
	} else {
		/* It is an error for the power policy to power off the GPU
		 * when there are contexts active */
		KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);

		if (kbdev->pm.shader_poweroff_pending) {
			kbdev->pm.shader_poweroff_pending = 0;
			kbdev->pm.shader_poweroff_pending_time = 0;
		}

		spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);


		/* Request power off */
		if (kbdev->pm.gpu_powered) {
			kbdev->pm.gpu_poweroff_pending = kbdev->pm.poweroff_gpu_ticks;
			if (!kbdev->pm.poweroff_timer_running) {
				/* Start timer if not running (eg if power policy has been changed from always_on
				 * to something else). This will ensure the GPU is actually powered off */
				kbdev->pm.poweroff_timer_running = MALI_TRUE;
				hrtimer_start(&kbdev->pm.gpu_poweroff_timer, kbdev->pm.gpu_poweroff_time, HRTIMER_MODE_REL);
			}
		}
	}
}
mali_error kbase_pm_powerup(struct kbase_device *kbdev)
{
	unsigned long flags;
	mali_error ret;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	mutex_lock(&kbdev->pm.lock);

	/* A suspend won't happen during startup/insmod */
	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));

	/* MALI_SEC_INTEGRATION */
	/* while the GPU initialization, vendor desired gpu log will be out by set_power_dbg(FALSE) calls */
	if(kbdev->vendor_callbacks->set_poweron_dbg)
		kbdev->vendor_callbacks->set_poweron_dbg(FALSE);

	/* Power up the GPU, don't enable IRQs as we are not ready to receive them. */
	ret = kbase_pm_init_hw(kbdev, MALI_FALSE);
	if (ret != MALI_ERROR_NONE) {
		mutex_unlock(&kbdev->pm.lock);
		return ret;
	}

	kbasep_pm_read_present_cores(kbdev);

	kbdev->pm.debug_core_mask = kbdev->shader_present_bitmap;

	/* Pretend the GPU is active to prevent a power policy turning the GPU cores off */
	kbdev->pm.active_count = 1;

	spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
	/* Ensure cycle counter is off */
	kbdev->pm.gpu_cycle_counter_requests = 0;
	spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);

	/* We are ready to receive IRQ's now as power policy is set up, so enable them now. */
#ifdef CONFIG_MALI_DEBUG
	spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
	kbdev->pm.driver_ready_for_irqs = MALI_TRUE;
	spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
#endif
	kbase_pm_enable_interrupts(kbdev);

	/* Turn on the GPU and any cores needed by the policy */
	kbase_pm_do_poweron(kbdev, MALI_FALSE);
	mutex_unlock(&kbdev->pm.lock);

	/* MALI_SEC_INTEGRATION */
	if(kbdev->vendor_callbacks->hwcnt_init)
		kbdev->vendor_callbacks->hwcnt_init(kbdev);

	/* Idle the GPU and/or cores, if the policy wants it to */
	kbase_pm_context_idle(kbdev);

	return MALI_ERROR_NONE;
}
Exemplo n.º 4
0
mali_error kbase_pm_powerup(kbase_device *kbdev)
{
	unsigned long flags;
	mali_error ret;

	KBASE_DEBUG_ASSERT(kbdev != NULL);

	mutex_lock(&kbdev->pm.lock);

	/* A suspend won't happen during startup/insmod */
	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));

	/* Power up the GPU, don't enable IRQs as we are not ready to receive them. */
	ret = kbase_pm_init_hw(kbdev, MALI_FALSE );
	if (ret != MALI_ERROR_NONE) {
		mutex_unlock(&kbdev->pm.lock);
		return ret;
	}

	kbasep_pm_read_present_cores(kbdev);

	kbdev->pm.debug_core_mask = kbdev->shader_present_bitmap;

	/* Pretend the GPU is active to prevent a power policy turning the GPU cores off */
	kbdev->pm.active_count = 1;

	spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
	/* Ensure cycle counter is off */
	kbdev->pm.gpu_cycle_counter_requests = 0;
	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
	spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);

	/* We are ready to receive IRQ's now as power policy is set up, so enable them now. */
#ifdef CONFIG_MALI_DEBUG
	spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
	kbdev->pm.driver_ready_for_irqs = MALI_TRUE;
	spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
#endif
	kbase_pm_enable_interrupts(kbdev);

	/* Turn on the GPU and any cores needed by the policy */
	kbase_pm_do_poweron(kbdev);
	mutex_unlock(&kbdev->pm.lock);

	/* Idle the GPU and/or cores, if the policy wants it to */
	kbase_pm_context_idle(kbdev);

	return MALI_ERROR_NONE;
}
Exemplo n.º 5
0
void kbase_pm_update_active(kbase_device *kbdev)
{
	unsigned long flags;
	mali_bool active;

	lockdep_assert_held(&kbdev->pm.lock);

	/* pm_current_policy will never be NULL while pm.lock is held */
	KBASE_DEBUG_ASSERT(kbdev->pm.pm_current_policy);

	spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);

	active = kbdev->pm.pm_current_policy->get_core_active(kbdev);

	if (active != MALI_FALSE) {
		spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);

		if (kbdev->pm.gpu_poweroff_pending == MALI_TRUE) {
			/* Cancel any pending power off request */
			kbdev->pm.gpu_poweroff_pending = MALI_FALSE;

			/* If a request was pending then the GPU was still powered, so no need to continue */
			return;
		}

		/* Power on the GPU and any cores requested by the policy */
		kbase_pm_do_poweron(kbdev, MALI_FALSE);
	} else {
		mali_bool cancel_timer = MALI_FALSE;

		/* It is an error for the power policy to power off the GPU
		 * when there are contexts active */
		KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);

		if (kbdev->pm.shader_poweroff_pending) {
			cancel_timer = MALI_TRUE;
			kbdev->pm.shader_poweroff_pending = 0;
		}

		spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);

		if (cancel_timer)
			hrtimer_cancel(&kbdev->pm.shader_poweroff_timer);

		/* Request power off */
		kbdev->pm.gpu_poweroff_pending = MALI_TRUE;
		queue_work(kbdev->pm.gpu_poweroff_wq, &kbdev->pm.gpu_poweroff_work);
	}
}