コード例 #1
0
void kbase_backend_ctx_count_changed(struct kbase_device *kbdev)
{
	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
	unsigned long flags;

	lockdep_assert_held(&js_devdata->runpool_mutex);

	if (!timer_callback_should_run(kbdev)) {
		/* Take spinlock to force synchronisation with timer */
		spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
		backend->timer_running = false;
		spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
		/* From now on, return value of timer_callback_should_run() will
		 * also cause the timer to not requeue itself. Its return value
		 * cannot change, because it depends on variables updated with
		 * the runpool_mutex held, which the caller of this must also
		 * hold */
		hrtimer_cancel(&backend->scheduling_timer);
	}

	if (timer_callback_should_run(kbdev) && !backend->timer_running) {
		/* Take spinlock to force synchronisation with timer */
		spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
		backend->timer_running = true;
		spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
		hrtimer_start(&backend->scheduling_timer,
			HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
							HRTIMER_MODE_REL);

		KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u,
									0u);
	}
}
コード例 #2
0
mali_error kbase_pm_policy_init(kbase_device *kbdev)
{
	KBASE_DEBUG_ASSERT(kbdev != NULL);

	kbdev->pm.gpu_poweroff_wq = alloc_workqueue("kbase_pm_do_poweroff", WQ_HIGHPRI | WQ_UNBOUND, 1);
	if (NULL == kbdev->pm.gpu_poweroff_wq)
		return MALI_ERROR_OUT_OF_MEMORY;
	INIT_WORK(&kbdev->pm.gpu_poweroff_work, kbasep_pm_do_gpu_poweroff_wq);

	hrtimer_init(&kbdev->pm.gpu_poweroff_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	kbdev->pm.gpu_poweroff_timer.function = kbasep_pm_do_gpu_poweroff_callback;

	kbdev->pm.pm_current_policy = policy_list[0];

	kbdev->pm.pm_current_policy->init(kbdev);

	kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_GPU_POWEROFF_TICK_NS));

	kbdev->pm.poweroff_shader_ticks = kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_SHADER);
	kbdev->pm.poweroff_gpu_ticks = kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_GPU);

	return MALI_ERROR_NONE;
}
コード例 #3
0
mali_error kbase_pm_policy_init(kbase_device *kbdev)
{
	KBASE_DEBUG_ASSERT(kbdev != NULL);

	kbdev->pm.gpu_poweroff_wq = alloc_workqueue("kbase_pm_do_poweroff", WQ_HIGHPRI, 1);
	if (NULL == kbdev->pm.gpu_poweroff_wq)
		return MALI_ERROR_OUT_OF_MEMORY;
	INIT_WORK(&kbdev->pm.gpu_poweroff_work, kbasep_pm_do_gpu_poweroff_wq);

	hrtimer_init(&kbdev->pm.shader_poweroff_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	kbdev->pm.shader_poweroff_timer.function = kbasep_pm_do_shader_poweroff_callback;

	kbdev->pm.pm_current_policy = policy_list[2];

	kbdev->pm.pm_current_policy->init(kbdev);

	kbdev->pm.shader_poweroff_time = HR_TIMER_DELAY_NSEC(kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_SHADER_POWEROFF_TIME) * 1000);

#if SLSI_INTEGRATION
	kbdev->hwcnt.prev_policy = policy_list[2];
#endif

	return MALI_ERROR_NONE;
}
コード例 #4
0
static enum hrtimer_restart timer_callback(struct hrtimer *timer)
{
	unsigned long flags;
	struct kbase_device *kbdev;
	struct kbasep_js_device_data *js_devdata;
	struct kbase_backend_data *backend;
	int s;
	bool reset_needed = false;

	KBASE_DEBUG_ASSERT(timer != NULL);

	backend = container_of(timer, struct kbase_backend_data,
							scheduling_timer);
	kbdev = container_of(backend, struct kbase_device, hwaccess.backend);
	js_devdata = &kbdev->js_data;

	/* Loop through the slots */
	spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
	for (s = 0; s < kbdev->gpu_props.num_job_slots; s++) {
		struct kbase_jd_atom *atom = NULL;

		if (kbase_backend_nr_atoms_on_slot(kbdev, s) > 0) {
			atom = kbase_gpu_inspect(kbdev, s, 0);
			KBASE_DEBUG_ASSERT(atom != NULL);
		}

		if (atom != NULL) {
			/* The current version of the model doesn't support
			 * Soft-Stop */
			if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) {
				u32 ticks = atom->sched_info.cfs.ticks++;

#if !CINSTR_DUMPING_ENABLED
				u32 soft_stop_ticks, hard_stop_ticks,
								gpu_reset_ticks;
				if (atom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
					soft_stop_ticks =
						js_devdata->soft_stop_ticks_cl;
					hard_stop_ticks =
						js_devdata->hard_stop_ticks_cl;
					gpu_reset_ticks =
						js_devdata->gpu_reset_ticks_cl;
				} else {
					soft_stop_ticks =
						js_devdata->soft_stop_ticks;
					hard_stop_ticks =
						js_devdata->hard_stop_ticks_ss;
					gpu_reset_ticks =
						js_devdata->gpu_reset_ticks_ss;
				}

				/* Job is Soft-Stoppable */
				if (ticks == soft_stop_ticks) {
					int disjoint_threshold =
		KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD;
					u32 softstop_flags = 0u;
					/* Job has been scheduled for at least
					 * js_devdata->soft_stop_ticks ticks.
					 * Soft stop the slot so we can run
					 * other jobs.
					 */
					dev_dbg(kbdev->dev, "Soft-stop");
#if !KBASE_DISABLE_SCHEDULING_SOFT_STOPS
					/* nr_user_contexts_running is updated
					 * with the runpool_mutex, but we can't
					 * take that here.
					 *
					 * However, if it's about to be
					 * increased then the new context can't
					 * run any jobs until they take the
					 * runpool_irq lock, so it's OK to
					 * observe the older value.
					 *
					 * Similarly, if it's about to be
					 * decreased, the last job from another
					 * context has already finished, so it's
					 * not too bad that we observe the older
					 * value and register a disjoint event
					 * when we try soft-stopping */
					if (js_devdata->nr_user_contexts_running
							>= disjoint_threshold)
						softstop_flags |=
						JS_COMMAND_SW_CAUSES_DISJOINT;

					kbase_job_slot_softstop_swflags(kbdev,
						s, atom, softstop_flags);
#endif
				} else if (ticks == hard_stop_ticks) {
					/* Job has been scheduled for at least
					 * js_devdata->hard_stop_ticks_ss ticks.
					 * It should have been soft-stopped by
					 * now. Hard stop the slot.
					 */
#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
					int ms =
						js_devdata->scheduling_period_ns
								/ 1000000u;
					dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
							(unsigned long)ticks,
							(unsigned long)ms);
					kbase_job_slot_hardstop(atom->kctx, s,
									atom);
#endif
				} else if (ticks == gpu_reset_ticks) {
					/* Job has been scheduled for at least
					 * js_devdata->gpu_reset_ticks_ss ticks.
					 * It should have left the GPU by now.
					 * Signal that the GPU needs to be
					 * reset.
					 */
					reset_needed = true;
				}
#else				/* !CINSTR_DUMPING_ENABLED */
				/* NOTE: During CINSTR_DUMPING_ENABLED, we use
				 * the alternate timeouts, which makes the hard-
				 * stop and GPU reset timeout much longer. We
				 * also ensure that we don't soft-stop at all.
				 */
				if (ticks == js_devdata->soft_stop_ticks) {
					/* Job has been scheduled for at least
					 * js_devdata->soft_stop_ticks. We do
					 * not soft-stop during
					 * CINSTR_DUMPING_ENABLED, however.
					 */
					dev_dbg(kbdev->dev, "Soft-stop");
				} else if (ticks ==
					js_devdata->hard_stop_ticks_dumping) {
					/* Job has been scheduled for at least
					 * js_devdata->hard_stop_ticks_dumping
					 * ticks. Hard stop the slot.
					 */
#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
					int ms =
						js_devdata->scheduling_period_ns
								/ 1000000u;
					dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)",
							(unsigned long)ticks,
							(unsigned long)ms);
					kbase_job_slot_hardstop(atom->kctx, s,
									atom);
#endif
				} else if (ticks ==
					js_devdata->gpu_reset_ticks_dumping) {
					/* Job has been scheduled for at least
					 * js_devdata->gpu_reset_ticks_dumping
					 * ticks. It should have left the GPU by
					 * now. Signal that the GPU needs to be
					 * reset.
					 */
					reset_needed = true;
				}
#endif				/* !CINSTR_DUMPING_ENABLED */
			}
		}
	}
#if KBASE_GPU_RESET_EN
	if (reset_needed) {
		dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (JS_RESET_TICKS_SS/DUMPING timeout hit). Issueing GPU soft-reset to resolve.");

		if (kbase_prepare_to_reset_gpu_locked(kbdev))
			kbase_reset_gpu_locked(kbdev);
	}
#endif /* KBASE_GPU_RESET_EN */
	/* the timer is re-issued if there is contexts in the run-pool */

	if (backend->timer_running)
		hrtimer_start(&backend->scheduling_timer,
			HR_TIMER_DELAY_NSEC(js_devdata->scheduling_period_ns),
			HRTIMER_MODE_REL);

	spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);

	return HRTIMER_NORESTART;
}