/* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ static void complete_commit(struct msm_commit *c) { struct drm_atomic_state *state = c->state; struct drm_device *dev = state->dev; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); /* NOTE: _wait_for_vblanks() only waits for vblank on * enabled CRTCs. So we end up faulting when disabling * due to (potentially) unref'ing the outgoing fb's * before the vblank when the disable has latched. * * But if it did wait on disabled (or newly disabled) * CRTCs, that would be racy (ie. we could have missed * the irq. We need some way to poll for pipe shut * down. Or just live with occasionally hitting the * timeout in the CRTC disable path (which really should * not be critical path) */ drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_state_free(state); end_atomic(dev->dev_private, c->crtc_mask); kfree(c); }
static void atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit) { struct drm_device *dev = commit->dev; struct atmel_hlcdc_dc *dc = dev->dev_private; struct drm_atomic_state *old_state = commit->state; /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_wait_for_vblanks(dev, old_state); drm_atomic_helper_cleanup_planes(dev, old_state); drm_atomic_state_free(old_state); /* Complete the commit, wake up any waiter. */ spin_lock(&dc->commit.wait.lock); dc->commit.pending = false; wake_up_all_locked(&dc->commit.wait); spin_unlock(&dc->commit.wait.lock); kfree(commit); }
/** * intel_atomic_commit - commit validated state object * @dev: DRM device * @state: the top-level driver state object * @async: asynchronous commit * * This function commits a top-level state object that has been validated * with drm_atomic_helper_check(). * * FIXME: Atomic modeset support for i915 is not yet complete. At the moment * we can only handle plane-related operations and do not yet support * asynchronous commit. * * RETURNS * Zero for success or -errno. */ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async) { int ret; int i; if (async) { DRM_DEBUG_KMS("i915 does not yet support async commit\n"); return -EINVAL; } ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) return ret; /* Point of no return */ /* * FIXME: The proper sequence here will eventually be: * * drm_atomic_helper_swap_state(dev, state) * drm_atomic_helper_commit_pre_planes(dev, state); * drm_atomic_helper_commit_planes(dev, state); * drm_atomic_helper_commit_post_planes(dev, state); * drm_atomic_helper_wait_for_vblanks(dev, state); * drm_atomic_helper_cleanup_planes(dev, state); * drm_atomic_state_free(state); * * once we have full atomic modeset. For now, just manually update * plane states to avoid clobbering good states with dummy states * while nuclear pageflipping. */ for (i = 0; i < dev->mode_config.num_total_plane; i++) { struct drm_plane *plane = state->planes[i]; if (!plane) continue; plane->state->state = state; swap(state->plane_states[i], plane->state); plane->state->state = NULL; } drm_atomic_helper_commit_planes(dev, state); drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_state_free(state); return 0; }