static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; struct omap_drm_private *priv = dev->dev_private; priv->dispc_ops->runtime_get(); /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); if (priv->omaprev != 0x3430) { /* With the current dss dispc implementation we have to enable * the new modeset before we can commit planes. The dispc ovl * configuration relies on the video mode configuration been * written into the HW when the ovl configuration is * calculated. * * This approach is not ideal because after a mode change the * plane update is executed only after the first vblank * interrupt. The dispc implementation should be fixed so that * it is able use uncommitted drm state information. */ drm_atomic_helper_commit_modeset_enables(dev, old_state); omap_atomic_wait_for_completion(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_hw_done(old_state); } else { /* * OMAP3 DSS seems to have issues with the work-around above, * resulting in endless sync losts if a crtc is enabled without * a plane. For now, skip the WA for OMAP3. */ drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_commit_hw_done(old_state); } /* * Wait for completion of the page flips to ensure that old buffers * can't be touched by the hardware anymore before cleaning up planes. */ omap_atomic_wait_for_completion(dev, old_state); drm_atomic_helper_cleanup_planes(dev, old_state); priv->dispc_ops->runtime_put(); }
void msm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; kms->funcs->prepare_commit(kms, state); drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); if (kms->funcs->commit) { DRM_DEBUG_ATOMIC("triggering commit\n"); kms->funcs->commit(kms, state); } msm_atomic_wait_for_commit_done(dev, state); kms->funcs->complete_commit(kms, state); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_cleanup_planes(dev, state); }
static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) { struct drm_pending_vblank_event *event; struct drm_device *drm = state->dev; struct malidp_drm *malidp = drm->dev_private; if (malidp->crtc.enabled) { /* only set config_valid if the CRTC is enabled */ if (malidp_set_and_wait_config_valid(drm)) DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); } event = malidp->crtc.state->event; if (event) { malidp->crtc.state->event = NULL; spin_lock_irq(&drm->event_lock); if (drm_crtc_vblank_get(&malidp->crtc) == 0) drm_crtc_arm_vblank_event(&malidp->crtc, event); else drm_crtc_send_vblank_event(&malidp->crtc, event); spin_unlock_irq(&drm->event_lock); } drm_atomic_helper_commit_hw_done(state); }
static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) { struct drm_device *drm = state->dev; struct malidp_drm *malidp = drm->dev_private; malidp->event = malidp->crtc.state->event; malidp->crtc.state->event = NULL; if (malidp->crtc.state->active) { /* * if we have an event to deliver to userspace, make sure * the vblank is enabled as we are sending it from the IRQ * handler. */ if (malidp->event) drm_crtc_vblank_get(&malidp->crtc); /* only set config_valid if the CRTC is enabled */ if (malidp_set_and_wait_config_valid(drm) < 0) DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); } else if (malidp->event) { /* CRTC inactive means vblank IRQ is disabled, send event directly */ spin_lock_irq(&drm->event_lock); drm_crtc_send_vblank_event(&malidp->crtc, malidp->event); malidp->event = NULL; spin_unlock_irq(&drm->event_lock); } drm_atomic_helper_commit_hw_done(state); }
static void vgdev_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); }
static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; drm_atomic_helper_commit_modeset_disables(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_wait_for_flip_done(dev, old_state); drm_atomic_helper_commit_hw_done(old_state); drm_atomic_helper_cleanup_planes(dev, old_state); }
static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *drm = old_state->dev; struct tegra_drm *tegra = drm->dev_private; if (tegra->hub) { drm_atomic_helper_commit_modeset_disables(drm, old_state); tegra_display_hub_atomic_commit(drm, old_state); drm_atomic_helper_commit_planes(drm, old_state, 0); drm_atomic_helper_commit_modeset_enables(drm, old_state); drm_atomic_helper_commit_hw_done(old_state); drm_atomic_helper_wait_for_vblanks(drm, old_state); drm_atomic_helper_cleanup_planes(drm, old_state); } else { drm_atomic_helper_commit_tail_rpm(old_state); } }
static void rockchip_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); drm_atomic_helper_commit_planes(dev, state, DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); }
static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; bool plane_disabling = false; int i; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, DRM_PLANE_COMMIT_ACTIVE_ONLY | DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET); drm_atomic_helper_commit_modeset_enables(dev, state); for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { if (drm_atomic_plane_disabling(old_plane_state, new_plane_state)) plane_disabling = true; } /* * The flip done wait is only strictly required by imx-drm if a deferred * plane disable is in-flight. As the core requires blocking commits * to wait for the flip it is done here unconditionally. This keeps the * workitem around a bit longer than required for the majority of * non-blocking commits, but we accept that for the sake of simplicity. */ drm_atomic_helper_wait_for_flip_done(dev, state); if (plane_disabling) { for_each_old_plane_in_state(state, plane, old_plane_state, i) ipu_plane_disable_deferred(plane); } drm_atomic_helper_commit_hw_done(state); }
void msm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; kms->funcs->prepare_commit(kms, state); drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); msm_atomic_wait_for_commit_done(dev, state); kms->funcs->complete_commit(kms, state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_cleanup_planes(dev, state); }