static u32 calc_residency(struct drm_device *dev, i915_reg_t reg) { struct drm_i915_private *dev_priv = dev->dev_private; u64 raw_time; /* 32b value may overflow during fixed point math */ u64 units = 128ULL, div = 100000ULL; u32 ret; if (!intel_enable_rc6(dev)) return 0; intel_runtime_pm_get(dev_priv); /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { units = 1; div = dev_priv->czclk_freq; if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) units <<= 8; } else if (IS_BROXTON(dev)) { units = 1; div = 1200; /* 833.33ns */ } raw_time = I915_READ(reg) * units; ret = DIV_ROUND_UP_ULL(raw_time, div); intel_runtime_pm_put(dev_priv); return ret; }
static u32 calc_residency(struct drm_device *dev, const u32 reg) { struct drm_i915_private *dev_priv = dev->dev_private; u64 raw_time; /* 32b value may overflow during fixed point math */ if (!intel_enable_rc6(dev)) return 0; raw_time = I915_READ(reg) * 128ULL; return DIV_ROUND_UP_ULL(raw_time, 100000); }
int intel_guc_sample_forcewake(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); u32 action[2]; action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; /* WaRsDisableCoarsePowerGating:skl,bxt */ if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) action[1] = 0; else /* bit 0 and 1 are for Render and Media domain separately */ action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; return intel_guc_send(guc, action, ARRAY_SIZE(action)); }
static int intel_runtime_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct drm_device *dev = pci_get_drvdata(pdev); struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) return -ENODEV; if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) return -ENODEV; DRM_DEBUG_KMS("Suspending device\n"); /* * We could deadlock here in case another thread holding struct_mutex * calls RPM suspend concurrently, since the RPM suspend will wait * first for this RPM suspend to finish. In this case the concurrent * RPM resume will be followed by its RPM suspend counterpart. Still * for consistency return -EAGAIN, which will reschedule this suspend. */ if (!mutex_trylock(&dev->struct_mutex)) { DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); /* * Bump the expiration timestamp, otherwise the suspend won't * be rescheduled. */ pm_runtime_mark_last_busy(device); return -EAGAIN; } disable_rpm_wakeref_asserts(dev_priv); /* * We are safe here against re-faults, since the fault handler takes * an RPM reference. */ i915_gem_release_all_mmaps(dev_priv); mutex_unlock(&dev->struct_mutex); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); intel_guc_suspend(dev); intel_suspend_gt_powersave(dev); intel_runtime_pm_disable_interrupts(dev_priv); ret = intel_suspend_complete(dev_priv); if (ret) { DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); intel_runtime_pm_enable_interrupts(dev_priv); enable_rpm_wakeref_asserts(dev_priv); return ret; } intel_uncore_forcewake_reset(dev, false); enable_rpm_wakeref_asserts(dev_priv); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) DRM_ERROR("Unclaimed access detected prior to suspending\n"); dev_priv->pm.suspended = true; /* * FIXME: We really should find a document that references the arguments * used below! */ if (IS_BROADWELL(dev)) { /* * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop * being detected, and the call we do at intel_runtime_resume() * won't be able to restore them. Since PCI_D3hot matches the * actual specification and appears to be working, use it. */ intel_opregion_notify_adapter(dev, PCI_D3hot); } else { /* * current versions of firmware which depend on this opregion * notification have repurposed the D1 definition to mean * "runtime suspended" vs. what you would normally expect (D3) * to distinguish it from notifications that might be sent via * the suspend path. */ intel_opregion_notify_adapter(dev, PCI_D1); } assert_forcewakes_inactive(dev_priv); DRM_DEBUG_KMS("Device suspended\n"); return 0; }
static ssize_t show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_minor *dminor = dev_to_drm_minor(kdev); return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); }
static ssize_t show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6()); }
static int intel_runtime_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct drm_device *dev = pci_get_drvdata(pdev); struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) return -ENODEV; WARN_ON(!HAS_RUNTIME_PM(dev)); assert_force_wake_inactive(dev_priv); DRM_DEBUG_KMS("Suspending device\n"); /* * We could deadlock here in case another thread holding struct_mutex * calls RPM suspend concurrently, since the RPM suspend will wait * first for this RPM suspend to finish. In this case the concurrent * RPM resume will be followed by its RPM suspend counterpart. Still * for consistency return -EAGAIN, which will reschedule this suspend. */ if (!mutex_trylock(&dev->struct_mutex)) { DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); /* * Bump the expiration timestamp, otherwise the suspend won't * be rescheduled. */ pm_runtime_mark_last_busy(device); return -EAGAIN; } /* * We are safe here against re-faults, since the fault handler takes * an RPM reference. */ i915_gem_release_all_mmaps(dev_priv); mutex_unlock(&dev->struct_mutex); /* * rps.work can't be rearmed here, since we get here only after making * sure the GPU is idle and the RPS freq is set to the minimum. See * intel_mark_idle(). */ cancel_work_sync(&dev_priv->rps.work); intel_runtime_pm_disable_interrupts(dev); if (IS_GEN6(dev)) { ret = 0; } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { ret = hsw_runtime_suspend(dev_priv); } else if (IS_VALLEYVIEW(dev)) { ret = vlv_runtime_suspend(dev_priv); } else { ret = -ENODEV; WARN_ON(1); } if (ret) { DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); intel_runtime_pm_restore_interrupts(dev); return ret; } del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); dev_priv->pm.suspended = true; /* * current versions of firmware which depend on this opregion * notification have repurposed the D1 definition to mean * "runtime suspended" vs. what you would normally expect (D3) * to distinguish it from notifications that might be sent * via the suspend path. */ intel_opregion_notify_adapter(dev, PCI_D1); DRM_DEBUG_KMS("Device suspended\n"); return 0; }
static ssize_t show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev)); }