Esempio n. 1
0
static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
		struct drm_file *file_priv, uint32_t handle,
		uint32_t width, uint32_t height)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	struct drm_device *dev = crtc->dev;
	struct drm_gem_object *cursor_bo, *old_bo;
	unsigned long flags;
	uint32_t iova;
	int ret;

	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
		dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
		return -EINVAL;
	}

	if (handle) {
		cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
		if (!cursor_bo)
			return -ENOENT;
	} else {
		cursor_bo = NULL;
	}

	if (cursor_bo) {
		ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
		if (ret)
			goto fail;
	} else {
		iova = 0;
	}

	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
	old_bo = mdp4_crtc->cursor.next_bo;
	mdp4_crtc->cursor.next_bo   = cursor_bo;
	mdp4_crtc->cursor.next_iova = iova;
	mdp4_crtc->cursor.width     = width;
	mdp4_crtc->cursor.height    = height;
	mdp4_crtc->cursor.stale     = true;
	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);

	if (old_bo) {
		/* drop our previous reference: */
		drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
	}

	request_pending(crtc, PENDING_CURSOR);

	return 0;

fail:
	drm_gem_object_unreference_unlocked(cursor_bo);
	return ret;
}
Esempio n. 2
0
static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct drm_framebuffer *old_fb = mdp4_crtc->fb;

	/* grab reference to incoming scanout fb: */
	drm_framebuffer_reference(new_fb);
	mdp4_crtc->base.primary->fb = new_fb;
	mdp4_crtc->fb = new_fb;

	if (old_fb)
		drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
}
Esempio n. 3
0
/* called from IRQ to update cursor related registers (if needed).  The
 * cursor registers, other than x/y position, appear not to be double
 * buffered, and changing them other than from vblank seems to trigger
 * underflow.
 */
static void update_cursor(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	enum mdp4_dma dma = mdp4_crtc->dma;
	unsigned long flags;

	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
	if (mdp4_crtc->cursor.stale) {
		struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
		struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
		uint32_t iova = mdp4_crtc->cursor.next_iova;

		if (next_bo) {
			/* take a obj ref + iova ref when we start scanning out: */
			drm_gem_object_reference(next_bo);
			msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);

			/* enable cursor: */
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
					MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
					MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
					MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
					MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
		} else {
			/* disable cursor: */
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
					mdp4_kms->blank_cursor_iova);
		}

		/* and drop the iova ref + obj rev when done scanning out: */
		if (prev_bo)
			drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);

		mdp4_crtc->cursor.scanout_bo = next_bo;
		mdp4_crtc->cursor.stale = false;
	}

	mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
			MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
			MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));

	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
}
Esempio n. 4
0
/* unlike update_fb(), take a ref to the new scanout fb *before* updating
 * plane, then call this.  Needed to ensure we don't unref the buffer that
 * is actually still being scanned out.
 *
 * Note that this whole thing goes away with atomic.. since we can defer
 * calling into driver until rendering is done.
 */
static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);

	/* flush updates, to make sure hw is updated to new scanout fb,
	 * so that we can safely queue unref to current fb (ie. next
	 * vblank we know hw is done w/ previous scanout_fb).
	 */
	crtc_flush(crtc);

	if (mdp4_crtc->scanout_fb)
		drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
				mdp4_crtc->scanout_fb);

	mdp4_crtc->scanout_fb = fb;

	/* enable vblank to complete flip: */
	request_pending(crtc, PENDING_FLIP);
}
Esempio n. 5
0
static void update_fb(struct drm_crtc *crtc, bool async,
		struct drm_framebuffer *new_fb)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct drm_framebuffer *old_fb = mdp4_crtc->fb;

	if (old_fb)
		drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);

	/* grab reference to incoming scanout fb: */
	drm_framebuffer_reference(new_fb);
	mdp4_crtc->base.fb = new_fb;
	mdp4_crtc->fb = new_fb;

	if (!async) {
		/* enable vblank to pick up the old_fb */
		mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
	}
}