void intel_fbc_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin) { struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) return; if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) return; mutex_lock(&fbc->lock); fbc->busy_bits &= ~frontbuffer_bits; if (!fbc->busy_bits && fbc->enabled && (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { if (fbc->active) intel_fbc_recompress(dev_priv); else __intel_fbc_post_update(fbc->crtc); } mutex_unlock(&fbc->lock); }
static void gen7_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; dev_priv->fbc.active = true; dpfc_ctl = 0; if (IS_IVYBRIDGE(dev_priv)) dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane); if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) threshold++; switch (threshold) { case 4: case 3: dpfc_ctl |= DPFC_CTL_LIMIT_4X; break; case 2: dpfc_ctl |= DPFC_CTL_LIMIT_2X; break; case 1: dpfc_ctl |= DPFC_CTL_LIMIT_1X; break; } dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; if (dev_priv->fbc.false_color) dpfc_ctl |= FBC_CTL_FALSE_COLOR; if (IS_IVYBRIDGE(dev_priv)) { /* WaFbcAsynchFlipDisableFbcQueue:ivb */ I915_WRITE(ILK_DISPLAY_CHICKEN1, I915_READ(ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe), I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); } I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); intel_fbc_recompress(dev_priv); }
static void ilk_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); if (params->fb.format->cpp[0] == 2) threshold++; switch (threshold) { case 4: case 3: dpfc_ctl |= DPFC_CTL_LIMIT_4X; break; case 2: dpfc_ctl |= DPFC_CTL_LIMIT_2X; break; case 1: dpfc_ctl |= DPFC_CTL_LIMIT_1X; break; } if (params->vma->fence) { dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN5(dev_priv)) dpfc_ctl |= params->vma->fence->id; if (IS_GEN6(dev_priv)) { I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | params->vma->fence->id); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } } else { if (IS_GEN6(dev_priv)) { I915_WRITE(SNB_DPFC_CTL_SA, 0); I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); } } I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); I915_WRITE(ILK_FBC_RT_BASE, i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); intel_fbc_recompress(dev_priv); }
static void ilk_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; dev_priv->fbc.active = true; dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane); if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) threshold++; switch (threshold) { case 4: case 3: dpfc_ctl |= DPFC_CTL_LIMIT_4X; break; case 2: dpfc_ctl |= DPFC_CTL_LIMIT_2X; break; case 1: dpfc_ctl |= DPFC_CTL_LIMIT_1X; break; } dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN5(dev_priv)) dpfc_ctl |= params->fb.fence_reg; I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); if (IS_GEN6(dev_priv)) { I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } intel_fbc_recompress(dev_priv); }
static void gen7_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; /* Display WA #0529: skl, kbl, bxt. */ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) { u32 val = I915_READ(CHICKEN_MISC_4); val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); if (i915_gem_object_get_tiling(params->vma->obj) != I915_TILING_X) val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; I915_WRITE(CHICKEN_MISC_4, val); } dpfc_ctl = 0; if (IS_IVYBRIDGE(dev_priv)) dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); if (params->fb.format->cpp[0] == 2) threshold++; switch (threshold) { case 4: case 3: dpfc_ctl |= DPFC_CTL_LIMIT_4X; break; case 2: dpfc_ctl |= DPFC_CTL_LIMIT_2X; break; case 1: dpfc_ctl |= DPFC_CTL_LIMIT_1X; break; } if (params->vma->fence) { dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | params->vma->fence->id); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } else { I915_WRITE(SNB_DPFC_CTL_SA,0); I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); } if (dev_priv->fbc.false_color) dpfc_ctl |= FBC_CTL_FALSE_COLOR; if (IS_IVYBRIDGE(dev_priv)) { /* WaFbcAsynchFlipDisableFbcQueue:ivb */ I915_WRITE(ILK_DISPLAY_CHICKEN1, I915_READ(ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe), I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); } I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); intel_fbc_recompress(dev_priv); }