void msm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; kms->funcs->prepare_commit(kms, state); drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); if (kms->funcs->commit) { DRM_DEBUG_ATOMIC("triggering commit\n"); kms->funcs->commit(kms, state); } msm_atomic_wait_for_commit_done(dev, state); kms->funcs->complete_commit(kms, state); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_cleanup_planes(dev, state); }
static void atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit) { struct drm_device *dev = commit->dev; struct atmel_hlcdc_dc *dc = dev->dev_private; struct drm_atomic_state *old_state = commit->state; /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_wait_for_vblanks(dev, old_state); drm_atomic_helper_cleanup_planes(dev, old_state); drm_atomic_state_free(old_state); /* Complete the commit, wake up any waiter. */ spin_lock(&dc->commit.wait.lock); dc->commit.pending = false; wake_up_all_locked(&dc->commit.wait); spin_unlock(&dc->commit.wait.lock); kfree(commit); }
/* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ static void complete_commit(struct msm_commit *c) { struct drm_atomic_state *state = c->state; struct drm_device *dev = state->dev; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); /* NOTE: _wait_for_vblanks() only waits for vblank on * enabled CRTCs. So we end up faulting when disabling * due to (potentially) unref'ing the outgoing fb's * before the vblank when the disable has latched. * * But if it did wait on disabled (or newly disabled) * CRTCs, that would be racy (ie. we could have missed * the irq. We need some way to poll for pipe shut * down. Or just live with occasionally hitting the * timeout in the CRTC disable path (which really should * not be critical path) */ drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_state_free(state); end_atomic(dev->dev_private, c->crtc_mask); kfree(c); }
static void vgdev_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); }
static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; struct omap_drm_private *priv = dev->dev_private; priv->dispc_ops->runtime_get(); /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); if (priv->omaprev != 0x3430) { /* With the current dss dispc implementation we have to enable * the new modeset before we can commit planes. The dispc ovl * configuration relies on the video mode configuration been * written into the HW when the ovl configuration is * calculated. * * This approach is not ideal because after a mode change the * plane update is executed only after the first vblank * interrupt. The dispc implementation should be fixed so that * it is able use uncommitted drm state information. */ drm_atomic_helper_commit_modeset_enables(dev, old_state); omap_atomic_wait_for_completion(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_hw_done(old_state); } else { /* * OMAP3 DSS seems to have issues with the work-around above, * resulting in endless sync losts if a crtc is enabled without * a plane. For now, skip the WA for OMAP3. */ drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_commit_hw_done(old_state); } /* * Wait for completion of the page flips to ensure that old buffers * can't be touched by the hardware anymore before cleaning up planes. */ omap_atomic_wait_for_completion(dev, old_state); drm_atomic_helper_cleanup_planes(dev, old_state); priv->dispc_ops->runtime_put(); }
/** * intel_atomic_commit - commit validated state object * @dev: DRM device * @state: the top-level driver state object * @async: asynchronous commit * * This function commits a top-level state object that has been validated * with drm_atomic_helper_check(). * * FIXME: Atomic modeset support for i915 is not yet complete. At the moment * we can only handle plane-related operations and do not yet support * asynchronous commit. * * RETURNS * Zero for success or -errno. */ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async) { int ret; int i; if (async) { DRM_DEBUG_KMS("i915 does not yet support async commit\n"); return -EINVAL; } ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) return ret; /* Point of no return */ /* * FIXME: The proper sequence here will eventually be: * * drm_atomic_helper_swap_state(dev, state) * drm_atomic_helper_commit_pre_planes(dev, state); * drm_atomic_helper_commit_planes(dev, state); * drm_atomic_helper_commit_post_planes(dev, state); * drm_atomic_helper_wait_for_vblanks(dev, state); * drm_atomic_helper_cleanup_planes(dev, state); * drm_atomic_state_free(state); * * once we have full atomic modeset. For now, just manually update * plane states to avoid clobbering good states with dummy states * while nuclear pageflipping. */ for (i = 0; i < dev->mode_config.num_total_plane; i++) { struct drm_plane *plane = state->planes[i]; if (!plane) continue; plane->state->state = state; swap(state->plane_states[i], plane->state); plane->state->state = NULL; } drm_atomic_helper_commit_planes(dev, state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_state_free(state); return 0; }
static void malidp_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *drm = state->dev; drm_atomic_helper_commit_modeset_disables(drm, state); drm_atomic_helper_commit_modeset_enables(drm, state); drm_atomic_helper_commit_planes(drm, state, true); malidp_atomic_commit_hw_done(state); drm_atomic_helper_wait_for_vblanks(drm, state); drm_atomic_helper_cleanup_planes(drm, state); }
static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async) { struct atmel_hlcdc_dc *dc = dev->dev_private; struct atmel_hlcdc_dc_commit *commit; int ret; ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) return ret; /* Allocate the commit object. */ commit = kzalloc(sizeof(*commit), GFP_KERNEL); if (!commit) { ret = -ENOMEM; goto error; } INIT_WORK(&commit->work, atmel_hlcdc_dc_atomic_work); commit->dev = dev; commit->state = state; spin_lock(&dc->commit.wait.lock); ret = wait_event_interruptible_locked(dc->commit.wait, !dc->commit.pending); if (ret == 0) dc->commit.pending = true; spin_unlock(&dc->commit.wait.lock); if (ret) { kfree(commit); goto error; } /* Swap the state, this is the point of no return. */ drm_atomic_helper_swap_state(state, true); if (async) queue_work(dc->wq, &commit->work); else atmel_hlcdc_dc_atomic_complete(commit); return 0; error: drm_atomic_helper_cleanup_planes(dev, state); return ret; }
static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; drm_atomic_helper_commit_modeset_disables(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_wait_for_flip_done(dev, old_state); drm_atomic_helper_commit_hw_done(old_state); drm_atomic_helper_cleanup_planes(dev, old_state); }
static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *drm = old_state->dev; struct tegra_drm *tegra = drm->dev_private; if (tegra->hub) { drm_atomic_helper_commit_modeset_disables(drm, old_state); tegra_display_hub_atomic_commit(drm, old_state); drm_atomic_helper_commit_planes(drm, old_state, 0); drm_atomic_helper_commit_modeset_enables(drm, old_state); drm_atomic_helper_commit_hw_done(old_state); drm_atomic_helper_wait_for_vblanks(drm, old_state); drm_atomic_helper_cleanup_planes(drm, old_state); } else { drm_atomic_helper_commit_tail_rpm(old_state); } }
static void rockchip_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); drm_atomic_helper_commit_planes(dev, state, DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); }
/* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ static void complete_commit(struct msm_commit *c, bool async) { struct drm_atomic_state *state = c->state; struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; drm_atomic_helper_wait_for_fences(dev, state, false); kms->funcs->prepare_commit(kms, state); drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); /* NOTE: _wait_for_vblanks() only waits for vblank on * enabled CRTCs. So we end up faulting when disabling * due to (potentially) unref'ing the outgoing fb's * before the vblank when the disable has latched. * * But if it did wait on disabled (or newly disabled) * CRTCs, that would be racy (ie. we could have missed * the irq. We need some way to poll for pipe shut * down. Or just live with occasionally hitting the * timeout in the CRTC disable path (which really should * not be critical path) */ msm_atomic_wait_for_commit_done(dev, state); drm_atomic_helper_cleanup_planes(dev, state); kms->funcs->complete_commit(kms, state); drm_atomic_state_put(state); commit_destroy(c); }
void msm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; kms->funcs->prepare_commit(kms, state); drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); msm_atomic_wait_for_commit_done(dev, state); kms->funcs->complete_commit(kms, state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_cleanup_planes(dev, state); }
/** * drm_atomic_helper_commit - commit validated state object * @dev: DRM device * @state: the driver state object * @nonblock: nonblocking commit * * This function commits a with drm_atomic_helper_check() pre-validated state * object. This can still fail when e.g. the framebuffer reservation fails. * * RETURNS * Zero for success or -errno. */ int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock) { struct msm_drm_private *priv = dev->dev_private; int nplanes = dev->mode_config.num_total_plane; int ncrtcs = dev->mode_config.num_crtc; struct msm_commit *c; int i, ret; ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) return ret; c = commit_init(state); if (!c) { ret = -ENOMEM; goto error; } /* * Figure out what crtcs we have: */ for (i = 0; i < ncrtcs; i++) { struct drm_crtc *crtc = state->crtcs[i]; if (!crtc) continue; c->crtc_mask |= (1 << drm_crtc_index(crtc)); } /* * Figure out what fence to wait for: */ for (i = 0; i < nplanes; i++) { struct drm_plane *plane = state->planes[i]; struct drm_plane_state *new_state = state->plane_states[i]; if (!plane) continue; if ((plane->state->fb != new_state->fb) && new_state->fb) { struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0); struct msm_gem_object *msm_obj = to_msm_bo(obj); new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); } } /* * Wait for pending updates on any of the same crtc's and then * mark our set of crtc's as busy: */ ret = start_atomic(dev->dev_private, c->crtc_mask); if (ret) { kfree(c); goto error; } /* * This is the point of no return - everything below never fails except * when the hw goes bonghits. Which means we can commit the new state on * the software side now. */ drm_atomic_helper_swap_state(dev, state); /* * Everything below can be run asynchronously without the need to grab * any modeset locks at all under one conditions: It must be guaranteed * that the asynchronous work has either been cancelled (if the driver * supports it, which at least requires that the framebuffers get * cleaned up with drm_atomic_helper_cleanup_planes()) or completed * before the new state gets committed on the software side with * drm_atomic_helper_swap_state(). * * This scheme allows new atomic state updates to be prepared and * checked in parallel to the asynchronous completion of the previous * update. Which is important since compositors need to figure out the * composition of the next frame right after having submitted the * current layout. */ if (nonblock) { queue_work(priv->atomic_wq, &c->work); return 0; } complete_commit(c, false); return 0; error: drm_atomic_helper_cleanup_planes(dev, state); return ret; }