Ejemplo n.º 1
0
static int set_property_atomic(struct drm_mode_object *obj,
			       struct drm_property *prop,
			       uint64_t prop_value)
{
	struct drm_device *dev = prop->dev;
	struct drm_atomic_state *state;
	struct drm_modeset_acquire_ctx ctx;
	int ret;

	drm_modeset_acquire_init(&ctx, 0);

	state = drm_atomic_state_alloc(dev);
	if (!state)
		return -ENOMEM;
	state->acquire_ctx = &ctx;
retry:
	if (prop == state->dev->mode_config.dpms_property) {
		if (obj->type != DRM_MODE_OBJECT_CONNECTOR) {
			ret = -EINVAL;
			goto out;
		}

		ret = drm_atomic_connector_commit_dpms(state,
						       obj_to_connector(obj),
						       prop_value);
	} else {
		ret = drm_atomic_set_property(state, obj, prop, prop_value);
		if (ret)
			goto out;
		ret = drm_atomic_commit(state);
	}
out:
	if (ret == -EDEADLK) {
		drm_atomic_state_clear(state);
		drm_modeset_backoff(&ctx);
		goto retry;
	}

	drm_atomic_state_put(state);

	drm_modeset_drop_locks(&ctx);
	drm_modeset_acquire_fini(&ctx);

	return ret;
}
Ejemplo n.º 2
0
static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
					bool enable)
{
	struct drm_device *dev = &dev_priv->drm;
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
	struct intel_crtc_state *pipe_config;
	struct drm_atomic_state *state;
	int ret = 0;

	drm_modeset_lock_all(dev);
	state = drm_atomic_state_alloc(dev);
	if (!state) {
		ret = -ENOMEM;
		goto unlock;
	}

	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
	pipe_config = intel_atomic_get_crtc_state(state, crtc);
	if (IS_ERR(pipe_config)) {
		ret = PTR_ERR(pipe_config);
		goto put_state;
	}

	pipe_config->pch_pfit.force_thru = enable;
	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
	    pipe_config->pch_pfit.enabled != enable)
		pipe_config->base.connectors_changed = true;

	ret = drm_atomic_commit(state);

put_state:
	drm_atomic_state_put(state);
unlock:
	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
	drm_modeset_unlock_all(dev);
}
Ejemplo n.º 3
0
static int atomic_run(const struct gbm *gbm, const struct egl *egl)
{
	struct gbm_bo *bo = NULL;
	struct drm_fb *fb;
	uint32_t i = 0;
	uint32_t flags = DRM_MODE_ATOMIC_NONBLOCK;
	struct timeval timeout;
	fd_set fds;
	int ret;

	if (egl_check(egl, eglDupNativeFenceFDANDROID) ||
	    egl_check(egl, eglCreateSyncKHR) ||
	    egl_check(egl, eglDestroySyncKHR) ||
	    egl_check(egl, eglWaitSyncKHR) ||
	    egl_check(egl, eglClientWaitSyncKHR))
		return -1;

	/* Allow a modeset change for the first commit only. */
	flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;

	while (1) {
		struct gbm_bo *next_bo;
		EGLSyncKHR gpu_fence = NULL;   /* out-fence from gpu, in-fence to kms */
		EGLSyncKHR kms_fence = NULL;   /* in-fence to gpu, out-fence from kms */

		if (drm.kms_out_fence_fd != -1) {
			kms_fence = create_fence(egl, drm.kms_out_fence_fd);
			assert(kms_fence);

			/* driver now has ownership of the fence fd: */
			drm.kms_out_fence_fd = -1;

			/* wait "on the gpu" (ie. this won't necessarily block, but
			 * will block the rendering until fence is signaled), until
			 * the previous pageflip completes so we don't render into
			 * the buffer that is still on screen.
			 */
			egl->eglWaitSyncKHR(egl->display, kms_fence, 0);
		}

		egl->draw(i++);

		/* insert fence to be singled in cmdstream.. this fence will be
		 * signaled when gpu rendering done
		 */
		gpu_fence = create_fence(egl, EGL_NO_NATIVE_FENCE_FD_ANDROID);
		assert(gpu_fence);

		eglSwapBuffers(egl->display, egl->surface);

		/* after swapbuffers, gpu_fence should be flushed, so safe
		 * to get fd:
		 */
		drm.kms_in_fence_fd = egl->eglDupNativeFenceFDANDROID(egl->display, gpu_fence);
		egl->eglDestroySyncKHR(egl->display, gpu_fence);
		assert(drm.kms_in_fence_fd != -1);

		next_bo = gbm_surface_lock_front_buffer(gbm->surface);
		if (!next_bo) {
			printf("Failed to lock frontbuffer\n");
			return -1;
		}
		fb = drm_fb_get_from_bo(next_bo);
		if (!fb) {
			printf("Failed to get a new framebuffer BO\n");
			return -1;
		}

		if (kms_fence) {
			EGLint status;

			/* Wait on the CPU side for the _previous_ commit to
			 * complete before we post the flip through KMS, as
			 * atomic will reject the commit if we post a new one
			 * whilst the previous one is still pending.
			 */
			do {
				status = egl->eglClientWaitSyncKHR(egl->display,
								   kms_fence,
								   0,
								   EGL_FOREVER_KHR);
			} while (status != EGL_CONDITION_SATISFIED_KHR);

			egl->eglDestroySyncKHR(egl->display, kms_fence);
		}

		/*
		 * Here you could also update drm plane layers if you want
		 * hw composition
		 */
		ret = drm_atomic_commit(fb->fb_id, flags);
		if (ret) {
			printf("failed to commit: %s\n", strerror(errno));
			return -1;
		}

		/* release last buffer to render on again: */
		if (bo)
			gbm_surface_release_buffer(gbm->surface, bo);
		bo = next_bo;

		/* Allow a modeset change for the first commit only. */
		flags &= ~(DRM_MODE_ATOMIC_ALLOW_MODESET);

		/* watch for user interruption */
		FD_ZERO(&fds);
		FD_SET(0, &fds);
		memset(&timeout, 0, sizeof(timeout));

		ret = select(1, &fds, NULL, NULL, &timeout);
		if (ret < 0) {
			printf("select() failed: %s\n", strerror(errno));
			break;
		}

		/*
		 * select() will immediately timeout if there was no user
		 * interrupt because of the 0 timeout. However, that's an
		 * expected situation, not an error, so we just ignore it
		 * here.
		 */
		if (FD_ISSET(0, &fds)) {
			printf("user interrupted!\n");
			break;
		}
	}

	return ret;
}
Ejemplo n.º 4
0
static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
			      bool enable)
{
	struct drm_device *dev = &dev_priv->drm;
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
	struct intel_crtc_state *pipe_config;
	struct drm_atomic_state *state;
	struct drm_modeset_acquire_ctx ctx;
	int ret = 0;

	drm_modeset_acquire_init(&ctx, 0);

	state = drm_atomic_state_alloc(dev);
	if (!state) {
		ret = -ENOMEM;
		goto unlock;
	}

	state->acquire_ctx = &ctx;

retry:
	pipe_config = intel_atomic_get_crtc_state(state, crtc);
	if (IS_ERR(pipe_config)) {
		ret = PTR_ERR(pipe_config);
		goto put_state;
	}

	if (HAS_IPS(dev_priv)) {
		/*
		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
		 * enabled and disabled dynamically based on package C states,
		 * user space can't make reliable use of the CRCs, so let's just
		 * completely disable it.
		 */
		pipe_config->ips_force_disable = enable;
		if (pipe_config->ips_enabled == enable)
			pipe_config->base.connectors_changed = true;
	}

	if (IS_HASWELL(dev_priv)) {
		pipe_config->pch_pfit.force_thru = enable;
		if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
		    pipe_config->pch_pfit.enabled != enable)
			pipe_config->base.connectors_changed = true;
	}

	ret = drm_atomic_commit(state);

put_state:
	if (ret == -EDEADLK) {
		drm_atomic_state_clear(state);
		drm_modeset_backoff(&ctx);
		goto retry;
	}

	drm_atomic_state_put(state);
unlock:
	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
	drm_modeset_drop_locks(&ctx);
	drm_modeset_acquire_fini(&ctx);
}