int __intel_context_do_pin(struct intel_context *ce) { int err; if (mutex_lock_interruptible(&ce->pin_mutex)) return -EINTR; if (likely(!atomic_read(&ce->pin_count))) { intel_wakeref_t wakeref; err = 0; with_intel_runtime_pm(ce->engine->i915, wakeref) err = ce->ops->pin(ce); if (err) goto err; i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */ intel_context_get(ce); smp_mb__before_atomic(); /* flush pin before it is visible */ } atomic_inc(&ce->pin_count); GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ mutex_unlock(&ce->pin_mutex); return 0; err: mutex_unlock(&ce->pin_mutex); return err; }
void intel_uc_suspend(struct drm_i915_private *i915) { struct intel_guc *guc = &i915->guc; intel_wakeref_t wakeref; if (!intel_guc_is_loaded(guc)) return; with_intel_runtime_pm(i915, wakeref) intel_uc_runtime_suspend(i915); }
static void guc_log_capture_logs(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); intel_wakeref_t wakeref; guc_read_update_log_buffer(log); /* * Generally device is expected to be active only at this * time, so get/put should be really quick. */ with_intel_runtime_pm(dev_priv, wakeref) guc_action_flush_log_complete(guc); }
void intel_guc_log_relay_flush(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *i915 = guc_to_i915(guc); intel_wakeref_t wakeref; /* * Before initiating the forceful flush, wait for any pending/ongoing * flush to complete otherwise forceful flush may not actually happen. */ flush_work(&log->relay.flush_work); with_intel_runtime_pm(i915, wakeref) guc_action_flush_log(guc); /* GuC would have updated log buffer by now, so capture it */ guc_log_capture_logs(log); }
int intel_reset_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_global_reset), /* attempt to recover GPU first */ SUBTEST(igt_wedged_reset), SUBTEST(igt_atomic_reset), }; intel_wakeref_t wakeref; int err = 0; if (!intel_has_gpu_reset(i915)) return 0; if (i915_terminally_wedged(i915)) return -EIO; /* we're long past hope of a successful reset */ with_intel_runtime_pm(i915, wakeref) err = i915_subtests(tests, i915); return err; }
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); intel_wakeref_t wakeref; int ret = 0; BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); GEM_BUG_ON(!log->vma); /* * GuC is recognizing log levels starting from 0 to max, we're using 0 * as indication that logging should be disabled. */ if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX) return -EINVAL; mutex_lock(&dev_priv->drm.struct_mutex); if (log->level == level) goto out_unlock; with_intel_runtime_pm(dev_priv, wakeref) ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level), GUC_LOG_LEVEL_IS_ENABLED(level), GUC_LOG_LEVEL_TO_VERBOSITY(level)); if (ret) { DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); goto out_unlock; } log->level = level; out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); return ret; }