static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; struct omap_drm_private *priv = dev->dev_private; priv->dispc_ops->runtime_get(); /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); if (priv->omaprev != 0x3430) { /* With the current dss dispc implementation we have to enable * the new modeset before we can commit planes. The dispc ovl * configuration relies on the video mode configuration been * written into the HW when the ovl configuration is * calculated. * * This approach is not ideal because after a mode change the * plane update is executed only after the first vblank * interrupt. The dispc implementation should be fixed so that * it is able use uncommitted drm state information. */ drm_atomic_helper_commit_modeset_enables(dev, old_state); omap_atomic_wait_for_completion(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_hw_done(old_state); } else { /* * OMAP3 DSS seems to have issues with the work-around above, * resulting in endless sync losts if a crtc is enabled without * a plane. For now, skip the WA for OMAP3. */ drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_commit_hw_done(old_state); } /* * Wait for completion of the page flips to ensure that old buffers * can't be touched by the hardware anymore before cleaning up planes. */ omap_atomic_wait_for_completion(dev, old_state); drm_atomic_helper_cleanup_planes(dev, old_state); priv->dispc_ops->runtime_put(); }
void msm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; kms->funcs->prepare_commit(kms, state); drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); if (kms->funcs->commit) { DRM_DEBUG_ATOMIC("triggering commit\n"); kms->funcs->commit(kms, state); } msm_atomic_wait_for_commit_done(dev, state); kms->funcs->complete_commit(kms, state); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_cleanup_planes(dev, state); }
static void atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit) { struct drm_device *dev = commit->dev; struct atmel_hlcdc_dc *dc = dev->dev_private; struct drm_atomic_state *old_state = commit->state; /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_wait_for_vblanks(dev, old_state); drm_atomic_helper_cleanup_planes(dev, old_state); drm_atomic_state_free(old_state); /* Complete the commit, wake up any waiter. */ spin_lock(&dc->commit.wait.lock); dc->commit.pending = false; wake_up_all_locked(&dc->commit.wait); spin_unlock(&dc->commit.wait.lock); kfree(commit); }
/* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ static void complete_commit(struct msm_commit *c) { struct drm_atomic_state *state = c->state; struct drm_device *dev = state->dev; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); /* NOTE: _wait_for_vblanks() only waits for vblank on * enabled CRTCs. So we end up faulting when disabling * due to (potentially) unref'ing the outgoing fb's * before the vblank when the disable has latched. * * But if it did wait on disabled (or newly disabled) * CRTCs, that would be racy (ie. we could have missed * the irq. We need some way to poll for pipe shut * down. Or just live with occasionally hitting the * timeout in the CRTC disable path (which really should * not be critical path) */ drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_state_free(state); end_atomic(dev->dev_private, c->crtc_mask); kfree(c); }
static void vgdev_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); }
/** * intel_atomic_commit - commit validated state object * @dev: DRM device * @state: the top-level driver state object * @async: asynchronous commit * * This function commits a top-level state object that has been validated * with drm_atomic_helper_check(). * * FIXME: Atomic modeset support for i915 is not yet complete. At the moment * we can only handle plane-related operations and do not yet support * asynchronous commit. * * RETURNS * Zero for success or -errno. */ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async) { int ret; int i; if (async) { DRM_DEBUG_KMS("i915 does not yet support async commit\n"); return -EINVAL; } ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) return ret; /* Point of no return */ /* * FIXME: The proper sequence here will eventually be: * * drm_atomic_helper_swap_state(dev, state) * drm_atomic_helper_commit_pre_planes(dev, state); * drm_atomic_helper_commit_planes(dev, state); * drm_atomic_helper_commit_post_planes(dev, state); * drm_atomic_helper_wait_for_vblanks(dev, state); * drm_atomic_helper_cleanup_planes(dev, state); * drm_atomic_state_free(state); * * once we have full atomic modeset. For now, just manually update * plane states to avoid clobbering good states with dummy states * while nuclear pageflipping. */ for (i = 0; i < dev->mode_config.num_total_plane; i++) { struct drm_plane *plane = state->planes[i]; if (!plane) continue; plane->state->state = state; swap(state->plane_states[i], plane->state); plane->state->state = NULL; } drm_atomic_helper_commit_planes(dev, state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_state_free(state); return 0; }
static void malidp_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *drm = state->dev; drm_atomic_helper_commit_modeset_disables(drm, state); drm_atomic_helper_commit_modeset_enables(drm, state); drm_atomic_helper_commit_planes(drm, state, true); malidp_atomic_commit_hw_done(state); drm_atomic_helper_wait_for_vblanks(drm, state); drm_atomic_helper_cleanup_planes(drm, state); }
static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; drm_atomic_helper_commit_modeset_disables(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_wait_for_flip_done(dev, old_state); drm_atomic_helper_commit_hw_done(old_state); drm_atomic_helper_cleanup_planes(dev, old_state); }
static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *drm = old_state->dev; struct tegra_drm *tegra = drm->dev_private; if (tegra->hub) { drm_atomic_helper_commit_modeset_disables(drm, old_state); tegra_display_hub_atomic_commit(drm, old_state); drm_atomic_helper_commit_planes(drm, old_state, 0); drm_atomic_helper_commit_modeset_enables(drm, old_state); drm_atomic_helper_commit_hw_done(old_state); drm_atomic_helper_wait_for_vblanks(drm, old_state); drm_atomic_helper_cleanup_planes(drm, old_state); } else { drm_atomic_helper_commit_tail_rpm(old_state); } }
static void rockchip_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); drm_atomic_helper_commit_planes(dev, state, DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); }
/* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ static void complete_commit(struct msm_commit *c, bool async) { struct drm_atomic_state *state = c->state; struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; drm_atomic_helper_wait_for_fences(dev, state, false); kms->funcs->prepare_commit(kms, state); drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); /* NOTE: _wait_for_vblanks() only waits for vblank on * enabled CRTCs. So we end up faulting when disabling * due to (potentially) unref'ing the outgoing fb's * before the vblank when the disable has latched. * * But if it did wait on disabled (or newly disabled) * CRTCs, that would be racy (ie. we could have missed * the irq. We need some way to poll for pipe shut * down. Or just live with occasionally hitting the * timeout in the CRTC disable path (which really should * not be critical path) */ msm_atomic_wait_for_commit_done(dev, state); drm_atomic_helper_cleanup_planes(dev, state); kms->funcs->complete_commit(kms, state); drm_atomic_state_put(state); commit_destroy(c); }
static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; bool plane_disabling = false; int i; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, DRM_PLANE_COMMIT_ACTIVE_ONLY | DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET); drm_atomic_helper_commit_modeset_enables(dev, state); for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { if (drm_atomic_plane_disabling(old_plane_state, new_plane_state)) plane_disabling = true; } /* * The flip done wait is only strictly required by imx-drm if a deferred * plane disable is in-flight. As the core requires blocking commits * to wait for the flip it is done here unconditionally. This keeps the * workitem around a bit longer than required for the majority of * non-blocking commits, but we accept that for the sake of simplicity. */ drm_atomic_helper_wait_for_flip_done(dev, state); if (plane_disabling) { for_each_old_plane_in_state(state, plane, old_plane_state, i) ipu_plane_disable_deferred(plane); } drm_atomic_helper_commit_hw_done(state); }
void msm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; kms->funcs->prepare_commit(kms, state); drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); msm_atomic_wait_for_commit_done(dev, state); kms->funcs->complete_commit(kms, state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_cleanup_planes(dev, state); }