/** * intel_fbc_choose_crtc - select a CRTC to enable FBC on * @dev_priv: i915 device instance * @state: the atomic state structure * * This function looks at the proposed state for CRTCs and planes, then chooses * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to * true. * * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. */ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) { struct intel_fbc *fbc = &dev_priv->fbc; struct intel_plane *plane; struct intel_plane_state *plane_state; bool crtc_chosen = false; int i; mutex_lock(&fbc->lock); /* Does this atomic commit involve the CRTC currently tied to FBC? */ if (fbc->crtc && !intel_atomic_get_new_crtc_state(state, fbc->crtc)) goto out; if (!intel_fbc_can_enable(dev_priv)) goto out; /* Simply choose the first CRTC that is compatible and has a visible * plane. We could go for fancier schemes such as checking the plane * size, but this would just affect the few platforms that don't tie FBC * to pipe or plane A. */ for_each_new_intel_plane_in_state(state, plane, plane_state, i) { struct intel_crtc_state *crtc_state; struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc); if (!plane_state->base.visible) continue; if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) continue; if (fbc_on_plane_a_only(dev_priv) && plane->i9xx_plane != PLANE_A) continue; crtc_state = intel_atomic_get_new_crtc_state(state, crtc); crtc_state->enable_fbc = true; crtc_chosen = true; break; } if (!crtc_chosen) fbc->no_fbc_reason = "no suitable CRTC for FBC"; out: mutex_unlock(&fbc->lock); }
static void intel_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct intel_atomic_state *state = to_intel_atomic_state(old_state->state); struct intel_plane *intel_plane = to_intel_plane(plane); const struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, intel_plane); struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc; if (new_plane_state->base.visible) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc)); trace_intel_update_plane(plane, to_intel_crtc(crtc)); intel_plane->update_plane(intel_plane, new_crtc_state, new_plane_state); } else { trace_intel_disable_plane(plane, to_intel_crtc(crtc)); intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc)); } }
void skl_update_planes_on_crtc(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct skl_ddb_entry entries_y[I915_MAX_PLANES]; struct skl_ddb_entry entries_uv[I915_MAX_PLANES]; u32 update_mask = new_crtc_state->update_planes; struct intel_plane *plane; memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y, sizeof(old_crtc_state->wm.skl.plane_ddb_y)); memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv, sizeof(old_crtc_state->wm.skl.plane_ddb_uv)); while ((plane = skl_next_plane_to_commit(state, crtc, entries_y, entries_uv, &update_mask))) { struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, plane); if (new_plane_state->base.visible) { trace_intel_update_plane(&plane->base, crtc); plane->update_plane(plane, new_crtc_state, new_plane_state); } else if (new_plane_state->slave) { struct intel_plane *master = new_plane_state->linked_plane; /* * We update the slave plane from this function because * programming it from the master plane's update_plane * callback runs into issues when the Y plane is * reassigned, disabled or used by a different plane. * * The slave plane is updated with the master plane's * plane_state. */ new_plane_state = intel_atomic_get_new_plane_state(state, master); trace_intel_update_plane(&plane->base, crtc); plane->update_slave(plane, new_crtc_state, new_plane_state); } else { trace_intel_disable_plane(&plane->base, crtc); plane->disable_plane(plane, new_crtc_state); } } }
static struct intel_plane * skl_next_plane_to_commit(struct intel_atomic_state *state, struct intel_crtc *crtc, struct skl_ddb_entry entries_y[I915_MAX_PLANES], struct skl_ddb_entry entries_uv[I915_MAX_PLANES], unsigned int *update_mask) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_plane_state *plane_state; struct intel_plane *plane; int i; if (*update_mask == 0) return NULL; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { enum plane_id plane_id = plane->id; if (crtc->pipe != plane->pipe || !(*update_mask & BIT(plane_id))) continue; if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id], entries_y, I915_MAX_PLANES, plane_id) || skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id], entries_uv, I915_MAX_PLANES, plane_id)) continue; *update_mask &= ~BIT(plane_id); entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id]; entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id]; return plane; } /* should never happen */ WARN_ON(1); return NULL; }
void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); u32 update_mask = new_crtc_state->update_planes; struct intel_plane_state *new_plane_state; struct intel_plane *plane; int i; for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { if (crtc->pipe != plane->pipe || !(update_mask & BIT(plane->id))) continue; if (new_plane_state->base.visible) intel_update_plane(plane, new_crtc_state, new_plane_state); else intel_disable_plane(plane, new_crtc_state); } }