Exemple #1
0
int rs600_irq_process(struct radeon_device *rdev)
{
	uint32_t status;
	uint32_t r500_disp_int;

	status = rs600_irq_ack(rdev, &r500_disp_int);
	if (!status && !r500_disp_int) {
		return IRQ_NONE;
	}
	while (status || r500_disp_int) {
		/* SW interrupt */
		if (status & RADEON_SW_INT_TEST) {
			radeon_fence_process(rdev);
		}
		/* Vertical blank interrupts */
		if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) {
			drm_handle_vblank(rdev->ddev, 0);
		}
		if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
			drm_handle_vblank(rdev->ddev, 1);
		}
		status = rs600_irq_ack(rdev, &r500_disp_int);
	}
	return IRQ_HANDLED;
}
irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_radeon_private_t *dev_priv =
	    (drm_radeon_private_t *) dev->dev_private;
	u32 stat;
	u32 r500_disp_int;

	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
		return IRQ_NONE;

	stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
	if (!stat)
		return IRQ_NONE;

	stat &= dev_priv->irq_enable_reg;

	
	if (stat & RADEON_SW_INT_TEST)
		DRM_WAKEUP(&dev_priv->swi_queue);

	
	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
		if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
			drm_handle_vblank(dev, 0);
		if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
			drm_handle_vblank(dev, 1);
	} else {
		if (stat & RADEON_CRTC_VBLANK_STAT)
			drm_handle_vblank(dev, 0);
		if (stat & RADEON_CRTC2_VBLANK_STAT)
			drm_handle_vblank(dev, 1);
	}
	return IRQ_HANDLED;
}
Exemple #3
0
irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_radeon_private_t *dev_priv =
	    (drm_radeon_private_t *) dev->dev_private;
	u32 stat;
	u32 r500_disp_int;

	/* Only consider the bits we're interested in - others could be used
	 * outside the DRM
	 */
	stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
	if (!stat)
		return IRQ_NONE;

	stat &= dev_priv->irq_enable_reg;

	/* SW interrupt */
	if (stat & RADEON_SW_INT_TEST)
		DRM_WAKEUP(&dev_priv->swi_queue);

	/* VBLANK interrupt */
	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
		if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
			drm_handle_vblank(dev, 0);
		if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
			drm_handle_vblank(dev, 1);
	} else {
		if (stat & RADEON_CRTC_VBLANK_STAT)
			drm_handle_vblank(dev, 0);
		if (stat & RADEON_CRTC2_VBLANK_STAT)
			drm_handle_vblank(dev, 1);
	}
	return IRQ_HANDLED;
}
Exemple #4
0
irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)
{
    struct drm_device *dev = arg;
    drm_mach64_private_t *dev_priv = dev->dev_private;
    int status;

    status = MACH64_READ(MACH64_CRTC_INT_CNTL);

    /* VBLANK interrupt */
    if (status & MACH64_CRTC_VBLANK_INT) {
        /* Mask off all interrupt ack bits before setting the ack bit, since
         * there may be other handlers outside the DRM.
         *
         * NOTE: On mach64, you need to keep the enable bits set when doing
         * the ack, despite what the docs say about not acking and enabling
         * in a single write.
         */
        MACH64_WRITE(MACH64_CRTC_INT_CNTL,
                     (status & ~MACH64_CRTC_INT_ACKS)
                     | MACH64_CRTC_VBLANK_INT);

        atomic_inc(&dev_priv->vbl_received);
        drm_handle_vblank(dev, 0);
        return IRQ_HANDLED;
    }
    return IRQ_NONE;
}
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
	int status;
	int handled = 0;

	status = MGA_READ(MGA_STATUS);

	/* VBLANK interrupt */
	if (status & MGA_VLINEPEN) {
		MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
		atomic_inc(&dev_priv->vbl_received);
		drm_handle_vblank(dev, 0);
		handled = 1;
	}

	/* SOFTRAP interrupt */
	if (status & MGA_SOFTRAPEN) {
		const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
		const u32 prim_end = MGA_READ(MGA_PRIMEND);


		MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);

		/* In addition to clearing the interrupt-pending bit, we
		 * have to write to MGA_PRIMEND to re-start the DMA operation.
		 */
<<<<<<< HEAD
		if ((prim_start & ~0x03) != (prim_end & ~0x03))
			MGA_WRITE(MGA_PRIMEND, prim_end);
=======
		if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
static
void mdfld_generic_dsi_dbi_save(struct drm_encoder *encoder)
{
	struct mdfld_dsi_encoder *dsi_encoder;
	struct mdfld_dsi_config *dsi_config;
	struct drm_device *dev;
	int pipe;

	PSB_DEBUG_ENTRY("\n");

	if (!encoder)
		return;

	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
	dev = dsi_config->dev;
	pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);

	DCLockMutex();
	mdfld_generic_dsi_dbi_set_power(encoder, false);

	drm_handle_vblank(dev, pipe);

	/* Turn off vsync (TE) interrupt. */
	drm_vblank_off(dev, pipe);

	/* Make the pending flip request as completed. */
	DCUnAttachPipe(pipe);
	DC_MRFLD_onPowerOff(pipe);
	DCUnLockMutex();
}
static int
nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
{
	struct nouveau_drm *drm =
		container_of(event, struct nouveau_drm, vblank[head]);
	drm_handle_vblank(drm->dev, head);
	return NVKM_EVENT_KEEP;
}
/* vblank interrupt handler */
static void xilinx_drm_crtc_vblank_handler(void *data)
{
	struct drm_crtc *base_crtc = data;
	struct drm_device *drm;

	if (!base_crtc)
		return;

	drm = base_crtc->dev;

	drm_handle_vblank(drm, 0);
	xilinx_drm_crtc_finish_page_flip(base_crtc);
}
Exemple #9
0
static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
{
	struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
						       struct amdgpu_crtc, vblank_timer);
	struct drm_device *ddev = amdgpu_crtc->base.dev;
	struct amdgpu_device *adev = ddev->dev_private;

	drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
	dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
	hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
		      HRTIMER_MODE_REL);

	return HRTIMER_NORESTART;
}
Exemple #10
0
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
	int status;

	status = R128_READ(R128_GEN_INT_STATUS);

	/* VBLANK interrupt */
	if (status & R128_CRTC_VBLANK_INT) {
		R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
		atomic_inc(&dev_priv->vbl_received);
		drm_handle_vblank(dev, 0);
		return IRQ_HANDLED;
	}
	return IRQ_NONE;
}
Exemple #11
0
static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
{
	struct rcar_du_crtc *rcrtc = arg;
	irqreturn_t ret = IRQ_NONE;
	u32 status;

	status = rcar_du_crtc_read(rcrtc, DSSR);
	rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);

	if (status & DSSR_FRM) {
		drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
		rcar_du_crtc_finish_page_flip(rcrtc);
		ret = IRQ_HANDLED;
	}

	return ret;
}
static void xylon_drm_crtc_vblank_handler(struct drm_crtc *base_crtc)
{
	struct drm_device *dev = base_crtc->dev;
	struct drm_pending_vblank_event *event;
	struct xylon_drm_crtc *crtc = to_xylon_crtc(base_crtc);
	unsigned long flags;

	drm_handle_vblank(dev, 0);

	spin_lock_irqsave(&dev->event_lock, flags);
	event = crtc->event;
	crtc->event = NULL;
	if (event) {
		drm_send_vblank_event(dev, 0, event);
		drm_vblank_put(dev, 0);
	}
	spin_unlock_irqrestore(&dev->event_lock, flags);
}
static
void mdfld_generic_dsi_dbi_dpms(struct drm_encoder *encoder, int mode)
{
	struct mdfld_dsi_encoder *dsi_encoder;
	struct mdfld_dsi_dbi_output *dbi_output;
	struct drm_device *dev;
	struct mdfld_dsi_config *dsi_config;
	struct drm_psb_private *dev_priv;

	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
	if (!dsi_config) {
		DRM_ERROR("dsi_config is NULL\n");
		return;
	}
	dbi_output = MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
	dev = dsi_config->dev;
	dev_priv = dev->dev_private;

	PSB_DEBUG_ENTRY("%s\n", (mode == DRM_MODE_DPMS_ON ? "on" : "off"));

	mutex_lock(&dev_priv->dpms_mutex);
	DCLockMutex();

	if (mode == DRM_MODE_DPMS_ON) {
		mdfld_generic_dsi_dbi_set_power(encoder, true);
		DCAttachPipe(dsi_config->pipe);
		DC_MRFLD_onPowerOn(dsi_config->pipe);
	} else {
		mdfld_generic_dsi_dbi_set_power(encoder, false);

		drm_handle_vblank(dev, dsi_config->pipe);

		/* Turn off TE interrupt. */
		drm_vblank_off(dev, dsi_config->pipe);

		/* Make the pending flip request as completed. */
		DCUnAttachPipe(dsi_config->pipe);
		DC_MRFLD_onPowerOff(dsi_config->pipe);
	}

	DCUnLockMutex();
	mutex_unlock(&dev_priv->dpms_mutex);
}
Exemple #14
0
static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
{
	struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
	struct drm_device *dev = mdp5_kms->dev;
	struct msm_drm_private *priv = dev->dev_private;
	unsigned int id;
	uint32_t status, enable;

	enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
	status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
	mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);

	VERB("status=%08x", status);

	mdp_dispatch_irqs(mdp_kms, status);

	for (id = 0; id < priv->num_crtcs; id++)
		if (status & mdp5_crtc_vblank(priv->crtcs[id]))
			drm_handle_vblank(dev, id);
}
Exemple #15
0
static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
{
	struct drm_device *dev = arg;
	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
	unsigned int int_status;
	int ret;

	ret = regmap_read(fsl_dev->regmap, DCU_INT_STATUS, &int_status);
	if (ret) {
		dev_err(dev->dev, "read DCU_INT_STATUS failed\n");
		return IRQ_NONE;
	}

	if (int_status & DCU_INT_STATUS_VBLANK)
		drm_handle_vblank(dev, 0);

	regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status);

	return IRQ_HANDLED;
}
Exemple #16
0
irqreturn_t mdp4_irq(struct msm_kms *kms)
{
	struct mdp_kms *mdp_kms = to_mdp_kms(kms);
	struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
	struct drm_device *dev = mdp4_kms->dev;
	struct msm_drm_private *priv = dev->dev_private;
	unsigned int id;
	uint32_t status;

	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);

	VERB("status=%08x", status);

	mdp_dispatch_irqs(mdp_kms, status);

	for (id = 0; id < priv->num_crtcs; id++)
		if (status & mdp4_crtc_vblank(priv->crtcs[id]))
			drm_handle_vblank(dev, id);

	return IRQ_HANDLED;
}
Exemple #17
0
static
void mdfld_dsi_dpi_save(struct drm_encoder *encoder)
{
	struct mdfld_dsi_encoder *dsi_encoder;
	struct mdfld_dsi_config *dsi_config;
	struct drm_device *dev;
	int pipe;

	if (!encoder)
		return;

	PSB_DEBUG_ENTRY("\n");

	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
	dev = dsi_config->dev;
	pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);

	DCLockMutex();

	/* give time to the last flip to take effective,
	 * if we disable hardware too quickly, overlay hardware may crash,
	 * causing pipe hang next time when we try to use overlay
	 */
	msleep(50);
	DC_MRFLD_onPowerOff(pipe);
	msleep(50);
	__mdfld_dsi_dpi_set_power(encoder, false);

	drm_handle_vblank(dev, pipe);

	/* Turn off vsync interrupt. */
	drm_vblank_off(dev, pipe);

	/* Make the pending flip request as completed. */
	DCUnAttachPipe(pipe);
	DCUnLockMutex();
}
static irqreturn_t shmob_drm_irq(int irq, void *arg)
{
	struct drm_device *dev = arg;
	struct shmob_drm_device *sdev = dev->dev_private;
	unsigned long flags;
	u32 status;

	/* Acknowledge interrupts. Putting interrupt enable and interrupt flag
	 * bits in the same register is really brain-dead design and requires
	 * taking a spinlock.
	 */
	spin_lock_irqsave(&sdev->irq_lock, flags);
	status = lcdc_read(sdev, LDINTR);
	lcdc_write(sdev, LDINTR, status ^ LDINTR_STATUS_MASK);
	spin_unlock_irqrestore(&sdev->irq_lock, flags);

	if (status & LDINTR_VES) {
		drm_handle_vblank(dev, 0);
		shmob_drm_crtc_finish_page_flip(&sdev->crtc);
	}

	return IRQ_HANDLED;
}
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
	int status;
	int handled = 0;

	status = MGA_READ(MGA_STATUS);

	
	if (status & MGA_VLINEPEN) {
		MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
		atomic_inc(&dev_priv->vbl_received);
		drm_handle_vblank(dev, 0);
		handled = 1;
	}

	
	if (status & MGA_SOFTRAPEN) {
		const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
		const u32 prim_end = MGA_READ(MGA_PRIMEND);


		MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);

		if ((prim_start & ~0x03) != (prim_end & ~0x03))
			MGA_WRITE(MGA_PRIMEND, prim_end);

		atomic_inc(&dev_priv->last_fence_retired);
		DRM_WAKEUP(&dev_priv->fence_queue);
		handled = 1;
	}

	if (handled)
		return IRQ_HANDLED;
	return IRQ_NONE;
}
Exemple #20
0
irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_radeon_private_t *dev_priv =
	    (drm_radeon_private_t *) dev->dev_private;
	u32 stat;
	u32 r500_disp_int;
	u32 tmp;

	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
		return IRQ_NONE;

	/* Only consider the bits we're interested in - others could be used
	 * outside the DRM
	 */
	stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
	if (!stat)
		return IRQ_NONE;

	stat &= dev_priv->irq_enable_reg;

	/* SW interrupt */
	if (stat & RADEON_SW_INT_TEST)
		DRM_WAKEUP(&dev_priv->swi_queue);

	/* VBLANK interrupt */
	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
		if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
			drm_handle_vblank(dev, 0);
		if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
			drm_handle_vblank(dev, 1);
	} else {
		if (stat & RADEON_CRTC_VBLANK_STAT)
			drm_handle_vblank(dev, 0);
		if (stat & RADEON_CRTC2_VBLANK_STAT)
			drm_handle_vblank(dev, 1);
	}
	if (dev->msi_enabled) {
		switch(dev_priv->flags & RADEON_FAMILY_MASK) {
			case CHIP_RS400:
			case CHIP_RS480:
				tmp = RADEON_READ(RADEON_AIC_CNTL) &
				    ~RS400_MSI_REARM;
				RADEON_WRITE(RADEON_AIC_CNTL, tmp);
				RADEON_WRITE(RADEON_AIC_CNTL,
				    tmp | RS400_MSI_REARM);
				break;
			case CHIP_RS600:
			case CHIP_RS690:
			case CHIP_RS740:
				tmp = RADEON_READ(RADEON_BUS_CNTL) &
				    ~RS600_MSI_REARM;
				RADEON_WRITE(RADEON_BUS_CNTL, tmp);
				RADEON_WRITE(RADEON_BUS_CNTL, tmp |
				    RS600_MSI_REARM);
				break;
			 default:
				tmp = RADEON_READ(RADEON_MSI_REARM_EN) &
				    ~RV370_MSI_REARM_EN;
				RADEON_WRITE(RADEON_MSI_REARM_EN, tmp);
				RADEON_WRITE(RADEON_MSI_REARM_EN,
				    tmp | RV370_MSI_REARM_EN);
				break;
		}
	}
	return IRQ_HANDLED;
}
Exemple #21
0
static
void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
{
	struct mdfld_dsi_encoder *dsi_encoder;
	struct mdfld_dsi_config *dsi_config;
	struct drm_device *dev;
	struct drm_psb_private *dev_priv;
	struct mdfld_dsi_dpi_output *dpi_output;
	struct panel_funcs *p_funcs;

	dsi_encoder = MDFLD_DSI_ENCODER(encoder);
	dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
	if (!dsi_config) {
		DRM_ERROR("dsi_config is NULL\n");
		return;
	}
	dev = dsi_config->dev;
	dev_priv = dev->dev_private;

	dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
	p_funcs = dpi_output->p_funcs;

	PSB_DEBUG_ENTRY("%s\n", (mode == DRM_MODE_DPMS_ON ? "on" :
		DRM_MODE_DPMS_STANDBY == mode ? "standby" : "off"));

	mutex_lock(&dev_priv->dpms_mutex);
	DCLockMutex();

	if (mode == DRM_MODE_DPMS_ON) {
		mdfld_dsi_dpi_set_power(encoder, true);
		DCAttachPipe(dsi_config->pipe);
		DC_MRFLD_onPowerOn(dsi_config->pipe);

#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
		{
			struct mdfld_dsi_hw_context *ctx =
				&dsi_config->dsi_hw_context;
			struct backlight_device bd;
			bd.props.brightness = ctx->lastbrightnesslevel;
			psb_set_brightness(&bd);
		}
#endif
	} else if (mode == DRM_MODE_DPMS_STANDBY) {
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
		struct mdfld_dsi_hw_context *ctx = &dsi_config->dsi_hw_context;
		struct backlight_device bd;
		ctx->lastbrightnesslevel = psb_get_brightness(&bd);
		bd.props.brightness = 0;
		psb_set_brightness(&bd);
#endif
		/* Make the pending flip request as completed. */
		DCUnAttachPipe(dsi_config->pipe);
		msleep(50);
		DC_MRFLD_onPowerOff(dsi_config->pipe);
		msleep(50);
	} else {
		mdfld_dsi_dpi_set_power(encoder, false);

		drm_handle_vblank(dev, dsi_config->pipe);

		/* Turn off TE interrupt. */
		drm_vblank_off(dev, dsi_config->pipe);

		/* Make the pending flip request as completed. */
		DCUnAttachPipe(dsi_config->pipe);
		DC_MRFLD_onPowerOff(dsi_config->pipe);
	}

	DCUnLockMutex();
	mutex_unlock(&dev_priv->dpms_mutex);
}
void pl111_common_irq(struct pl111_drm_crtc *pl111_crtc)
{
	struct drm_device *dev = pl111_crtc->crtc.dev;
	struct pl111_drm_flip_resource *old_flip_res;
	struct pl111_gem_bo *bo;
	unsigned long irq_flags;
	int flips_in_flight;
#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
	unsigned long flags;
#endif

	spin_lock_irqsave(&pl111_crtc->base_update_lock, irq_flags);

	/*
	 * Cache the flip resource that caused the IRQ since it will be
	 * dispatched later. Early return if the IRQ isn't associated to
	 * a base register update.
	 * 
	 * TODO MIDBASE-2790: disable IRQs when a flip is not pending.
	 */
	old_flip_res = pl111_crtc->current_update_res;
	if (!old_flip_res) {
		spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags);
		return;
	}
	pl111_crtc->current_update_res = NULL;

	/* Prepare the next flip (if any) of the queue as soon as possible. */
	if (!list_empty(&pl111_crtc->update_queue)) {
		struct pl111_drm_flip_resource *flip_res;
		/* Remove the head of the list */
		flip_res = list_first_entry(&pl111_crtc->update_queue,
			struct pl111_drm_flip_resource, link);
		list_del(&flip_res->link);
		do_flip_to_res(flip_res);
		/*
		 * current_update_res will be set, so guarentees that
		 * another flip_res coming in gets queued instead of
		 * handled immediately
		 */
	}
	spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags);

	/* Finalize properly the flip that caused the IRQ */
	DRM_DEBUG_KMS("DRM Finalizing old_flip_res=%p\n", old_flip_res);

	bo = PL111_BO_FROM_FRAMEBUFFER(old_flip_res->fb);
#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
	spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags);
	release_kds_resource_and_display(old_flip_res);
	spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags);
#endif
	/* Release DMA buffer on this flip */
	if (bo->gem_object.export_dma_buf != NULL)
		dma_buf_put(bo->gem_object.export_dma_buf);

	drm_handle_vblank(dev, pl111_crtc->crtc_index);

	/* Wake up any processes waiting for page flip event */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
	if (old_flip_res->event) {
		spin_lock_bh(&dev->event_lock);
		drm_send_vblank_event(dev, pl111_crtc->crtc_index,
					old_flip_res->event);
		spin_unlock_bh(&dev->event_lock);
	}
#else
	if (old_flip_res->event) {
		struct drm_pending_vblank_event *e = old_flip_res->event;
		struct timeval now;
		unsigned int seq;

		DRM_DEBUG_KMS("%s: wake up page flip event (%p)\n", __func__,
				old_flip_res->event);

		spin_lock_bh(&dev->event_lock);
		seq = drm_vblank_count_and_time(dev, pl111_crtc->crtc_index,
							&now);
		e->pipe = pl111_crtc->crtc_index;
		e->event.sequence = seq;
		e->event.tv_sec = now.tv_sec;
		e->event.tv_usec = now.tv_usec;

		list_add_tail(&e->base.link,
			&e->base.file_priv->event_list);

		wake_up_interruptible(&e->base.file_priv->event_wait);
		spin_unlock_bh(&dev->event_lock);
	}
#endif

	drm_vblank_put(dev, pl111_crtc->crtc_index);

	/*
	 * workqueue.c:process_one_work():
	 * "It is permissible to free the struct work_struct from
	 *  inside the function that is called from it"
	 */
	kmem_cache_free(priv.page_flip_slab, old_flip_res);

	flips_in_flight = atomic_dec_return(&priv.nr_flips_in_flight);
	if (flips_in_flight == 0 ||
			flips_in_flight == (NR_FLIPS_IN_FLIGHT_THRESHOLD - 1))
		wake_up(&priv.wait_for_flips);

	DRM_DEBUG_KMS("DRM release flip_res=%p\n", old_flip_res);
}